Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

#!/usr/bin/env python 

# 

# Original work copyright (C) Citrix systems 

# Modified work copyright (C) Vates SAS and XCP-ng community 

# 

# This program is free software; you can redistribute it and/or modify 

# it under the terms of the GNU Lesser General Public License as published 

# by the Free Software Foundation; version 2.1 only. 

# 

# This program is distributed in the hope that it will be useful, 

# but WITHOUT ANY WARRANTY; without even the implied warranty of 

# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

# GNU Lesser General Public License for more details. 

# 

# You should have received a copy of the GNU Lesser General Public License 

# along with this program; if not, write to the Free Software Foundation, Inc., 

# 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA 

# 

# CEPHFSSR: Based on FileSR, mounts ceph fs share 

 

import errno 

import os 

import syslog as _syslog 

import xmlrpclib 

from syslog import syslog 

 

# careful with the import order here 

# FileSR has a circular dependency: 

# FileSR -> blktap2 -> lvutil -> EXTSR -> FileSR 

# importing in this order seems to avoid triggering the issue. 

import SR 

import SRCommand 

import FileSR 

# end of careful 

import cleanup 

import util 

import vhdutil 

import xs_errors 

from lock import Lock 

 

CAPABILITIES = ["SR_PROBE", "SR_UPDATE", 

                "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", 

                "VDI_UPDATE", "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "VDI_MIRROR", 

                "VDI_GENERATE_CONFIG", 

                "VDI_RESET_ON_BOOT/2", "ATOMIC_PAUSE"] 

 

CONFIGURATION = [ 

    ['server', 'Ceph server(s) (required, ex: "192.168.0.12" or "10.10.10.10,10.10.10.26")'], 

    ['serverpath', 'Ceph FS path (required, ex: "/")'], 

    ['serverport', 'ex: 6789'], 

    ['options', 'Ceph FS client name, and secretfile (required, ex: "name=admin,secretfile=/etc/ceph/admin.secret")'] 

] 

 

DRIVER_INFO = { 

    'name': 'CephFS VHD', 

    'description': 'SR plugin which stores disks as VHD files on a CephFS storage', 

    'vendor': 'Vates SAS', 

    'copyright': '(C) 2020 Vates SAS', 

    'driver_version': '1.0', 

    'required_api_version': '1.0', 

    'capabilities': CAPABILITIES, 

    'configuration': CONFIGURATION 

} 

 

DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True} 

 

# The mountpoint for the directory when performing an sr_probe.  All probes 

# are guaranteed to be serialised by xapi, so this single mountpoint is fine. 

PROBE_MOUNTPOINT = os.path.join(SR.MOUNT_BASE, "probe") 

 

 

class CephFSException(Exception): 

    def __init__(self, errstr): 

        self.errstr = errstr 

 

 

# mountpoint = /var/run/sr-mount/CephFS/uuid 

# linkpath = mountpoint/uuid - path to SR directory on share 

# path = /var/run/sr-mount/uuid - symlink to SR directory on share 

class CephFSSR(FileSR.FileSR): 

    """Ceph file-based storage repository""" 

 

    DRIVER_TYPE = 'cephfs' 

 

    def handles(sr_type): 

        # fudge, because the parent class (FileSR) checks for smb to alter its behavior 

        return sr_type == CephFSSR.DRIVER_TYPE or sr_type == 'smb' 

 

    handles = staticmethod(handles) 

 

    def load(self, sr_uuid): 

        if not self._is_ceph_available(): 

            raise xs_errors.XenError( 

                'SRUnavailable', 

                opterr='ceph is not installed' 

            ) 

 

        self.ops_exclusive = FileSR.OPS_EXCLUSIVE 

        self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

        self.sr_vditype = SR.DEFAULT_TAP 

        self.driver_config = DRIVER_CONFIG 

        if 'server' not in self.dconf: 

            raise xs_errors.XenError('ConfigServerMissing') 

        self.remoteserver = self.dconf['server'] 

        self.remotepath = self.dconf['serverpath'] 

        # if serverport is not specified, use default 6789 

        if 'serverport' not in self.dconf: 

            self.remoteport = "6789" 

        else: 

            self.remoteport = self.dconf['serverport'] 

        if self.sr_ref and self.session is not None: 

            self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) 

        else: 

            self.sm_config = self.srcmd.params.get('sr_sm_config') or {} 

        self.mountpoint = os.path.join(SR.MOUNT_BASE, 'CephFS', sr_uuid) 

        self.linkpath = os.path.join(self.mountpoint, sr_uuid or "") 

        self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) 

        self._check_o_direct() 

 

    def checkmount(self): 

        return util.ioretry(lambda: ((util.pathexists(self.mountpoint) and 

                                      util.ismount(self.mountpoint)) and 

                                     util.pathexists(self.path))) 

 

    def mount(self, mountpoint=None): 

        """Mount the remote ceph export at 'mountpoint'""" 

        if mountpoint is None: 

            mountpoint = self.mountpoint 

        elif not util.is_string(mountpoint) or mountpoint == "": 

            raise CephFSException("mountpoint not a string object") 

 

        try: 

            if not util.ioretry(lambda: util.isdir(mountpoint)): 

                util.ioretry(lambda: util.makedirs(mountpoint)) 

        except util.CommandException, inst: 

            raise CephFSException("Failed to make directory: code is %d" % inst.code) 

 

        try: 

            options = [] 

            if self.dconf.has_key('options'): 

                options.append(self.dconf['options']) 

            if options: 

                options = ['-o', ','.join(options)] 

            command = ["mount", '-t', 'ceph', self.remoteserver+":"+self.remoteport+":"+self.remotepath, mountpoint] + options 

            util.ioretry(lambda: util.pread(command), errlist=[errno.EPIPE, errno.EIO], maxretry=2, nofail=True) 

        except util.CommandException, inst: 

            syslog(_syslog.LOG_ERR, 'CephFS mount failed ' + inst.__str__()) 

            raise CephFSException("mount failed with return code %d" % inst.code) 

 

        # Sanity check to ensure that the user has at least RO access to the 

        # mounted share. Windows sharing and security settings can be tricky. 

        try: 

            util.listdir(mountpoint) 

        except util.CommandException: 

            try: 

                self.unmount(mountpoint, True) 

            except CephFSException: 

                util.logException('CephFSSR.unmount()') 

            raise CephFSException("Permission denied. Please check user privileges.") 

 

    def unmount(self, mountpoint, rmmountpoint): 

        try: 

            util.pread(["umount", mountpoint]) 

        except util.CommandException, inst: 

            raise CephFSException("umount failed with return code %d" % inst.code) 

        if rmmountpoint: 

            try: 

                os.rmdir(mountpoint) 

            except OSError, inst: 

                raise CephFSException("rmdir failed with error '%s'" % inst.strerror) 

 

    def attach(self, sr_uuid): 

        if not self.checkmount(): 

            try: 

                self.mount() 

                os.symlink(self.linkpath, self.path) 

            except CephFSException, exc: 

                raise SR.SROSError(12, exc.errstr) 

        self.attached = True 

 

    def probe(self): 

        try: 

            self.mount(PROBE_MOUNTPOINT) 

            sr_list = filter(util.match_uuid, util.listdir(PROBE_MOUNTPOINT)) 

            self.unmount(PROBE_MOUNTPOINT, True) 

        except (util.CommandException, xs_errors.XenError): 

            raise 

        # Create a dictionary from the SR uuids to feed SRtoXML() 

        sr_dict = {sr_uuid: {} for sr_uuid in sr_list} 

        return util.SRtoXML(sr_dict) 

 

    def detach(self, sr_uuid): 

        if not self.checkmount(): 

            return 

        util.SMlog("Aborting GC/coalesce") 

        cleanup.abort(self.uuid) 

        # Change directory to avoid unmount conflicts 

        os.chdir(SR.MOUNT_BASE) 

        self.unmount(self.mountpoint, True) 

        os.unlink(self.path) 

        self.attached = False 

 

    def create(self, sr_uuid, size): 

        if self.checkmount(): 

            raise SR.SROSError(113, 'CephFS mount point already attached') 

 

        try: 

            self.mount() 

        except CephFSException, exc: 

            # noinspection PyBroadException 

            try: 

                os.rmdir(self.mountpoint) 

            except: 

                # we have no recovery strategy 

                pass 

            raise SR.SROSError(111, "CephFS mount error [opterr=%s]" % exc.errstr) 

 

        if util.ioretry(lambda: util.pathexists(self.linkpath)): 

            if len(util.ioretry(lambda: util.listdir(self.linkpath))) != 0: 

                self.detach(sr_uuid) 

                raise xs_errors.XenError('SRExists') 

        else: 

            try: 

                util.ioretry(lambda: util.makedirs(self.linkpath)) 

                os.symlink(self.linkpath, self.path) 

            except util.CommandException, inst: 

                if inst.code != errno.EEXIST: 

                    try: 

                        self.unmount(self.mountpoint, True) 

                    except CephFSException: 

                        util.logException('CephFSSR.unmount()') 

                    raise SR.SROSError(116, 

                                       "Failed to create CephFS SR. remote directory creation error: {}".format( 

                                           os.strerror(inst.code))) 

        self.detach(sr_uuid) 

 

    def delete(self, sr_uuid): 

        # try to remove/delete non VDI contents first 

        super(CephFSSR, self).delete(sr_uuid) 

        try: 

            if self.checkmount(): 

                self.detach(sr_uuid) 

            self.mount() 

            if util.ioretry(lambda: util.pathexists(self.linkpath)): 

                util.ioretry(lambda: os.rmdir(self.linkpath)) 

            util.SMlog(str(self.unmount(self.mountpoint, True))) 

        except util.CommandException, inst: 

            self.detach(sr_uuid) 

            if inst.code != errno.ENOENT: 

                raise SR.SROSError(114, "Failed to remove CephFS mount point") 

 

    def vdi(self, uuid, loadLocked=False): 

        return CephFSFileVDI(self, uuid) 

 

    @staticmethod 

    def _is_ceph_available(): 

        import distutils.spawn 

        return distutils.spawn.find_executable('ceph') 

 

class CephFSFileVDI(FileSR.FileVDI): 

    def attach(self, sr_uuid, vdi_uuid): 

        if not hasattr(self, 'xenstore_data'): 

            self.xenstore_data = {} 

 

        self.xenstore_data['storage-type'] = CephFSSR.DRIVER_TYPE 

 

        return super(CephFSFileVDI, self).attach(sr_uuid, vdi_uuid) 

 

    def generate_config(self, sr_uuid, vdi_uuid): 

        util.SMlog("SMBFileVDI.generate_config") 

        if not util.pathexists(self.path): 

            raise xs_errors.XenError('VDIUnavailable') 

        resp = {'device_config': self.sr.dconf, 

                'sr_uuid': sr_uuid, 

                'vdi_uuid': vdi_uuid, 

                'sr_sm_config': self.sr.sm_config, 

                'command': 'vdi_attach_from_config'} 

        # Return the 'config' encoded within a normal XMLRPC response so that 

        # we can use the regular response/error parsing code. 

        config = xmlrpclib.dumps(tuple([resp]), "vdi_attach_from_config") 

        return xmlrpclib.dumps((config,), "", True) 

 

    def attach_from_config(self, sr_uuid, vdi_uuid): 

        try: 

            if not util.pathexists(self.sr.path): 

                self.sr.attach(sr_uuid) 

        except: 

            util.logException("SMBFileVDI.attach_from_config") 

            raise xs_errors.XenError('SRUnavailable', 

                                     opterr='Unable to attach from config') 

 

 

294if __name__ == '__main__': 

    SRCommand.run(CephFSSR, DRIVER_INFO) 

else: 

    SR.registerSR(CephFSSR)