Coverage for drivers/LVHDSR : 12%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
#!/usr/bin/python # # Copyright (C) Citrix Systems Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation; version 2.1 only. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # LVHDSR: VHD on LVM storage repository #
UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \ READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \ requiresUpgrade, LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \ METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
"VDI_CREATE","VDI_DELETE","VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR", "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE", "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT", "VDI_ACTIVATE", "VDI_DEACTIVATE"]
'name': 'Local VHD on LVM', 'description': 'SR plugin which represents disks as VHD disks on ' + \ 'Logical Volumes within a locally-attached Volume Group', 'vendor': 'XenSource Inc', 'copyright': '(C) 2008 XenSource Inc', 'driver_version': '1.0', 'required_api_version': '1.0', 'capabilities': CAPABILITIES, 'configuration': CONFIGURATION }
"sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan", "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot", "vdi_clone" ]
# Log if snapshot pauses VM for more than this many seconds
TEST_MODE_VHD_FAIL_REPARENT_BEGIN: "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN", TEST_MODE_VHD_FAIL_REPARENT_LOCATOR: "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR", TEST_MODE_VHD_FAIL_REPARENT_END: "VHD_UTIL_TEST_FAIL_REPARENT_END", TEST_MODE_VHD_FAIL_RESIZE_BEGIN: "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN", TEST_MODE_VHD_FAIL_RESIZE_DATA: "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED", TEST_MODE_VHD_FAIL_RESIZE_METADATA: "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED", TEST_MODE_VHD_FAIL_RESIZE_END: "VHD_UTIL_TEST_FAIL_RESIZE_END" }
"""Returns True if this SR class understands the given dconf string""" # we can pose as LVMSR or EXTSR for compatibility purposes if __name__ == '__main__': name = sys.argv[0] else: name = __name__ if name.endswith("LVMSR"): return type == "lvm" elif name.endswith("EXTSR"): return type == "ext" return type == LVHDSR.DRIVER_TYPE
self.isMaster = True
except: raise xs_errors.XenError('SRUnavailable', \ opterr='Failed to initialise the LVMCache') # Test for thick vs thin provisioning conf parameter if self.dconf.has_key('allocation'): if self.dconf['allocation'] in self.PROVISIONING_TYPES: self.provision = self.dconf['allocation'] else: raise xs_errors.XenError('InvalidArg', \ opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref) if self.other_conf.get(self.TEST_MODE_KEY): self.testMode = self.other_conf[self.TEST_MODE_KEY] self._prepareTestMode()
self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) # sm_config flag overrides PBD, if any if self.sm_config.get('allocation') in self.PROVISIONING_TYPES: self.provision = self.sm_config.get('allocation')
if self.sm_config.get(self.FLAG_USE_VHD) == "true": self.legacyMode = False
if lvutil._checkVG(self.vgname): if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", "vdi_activate", "vdi_deactivate"]: self._undoAllJournals() if not self.cmd in ["sr_attach","sr_probe"]: self._checkMetadataVolume()
self.mdexists = False
# get a VDI -> TYPE map from the storage contains_uuid_regex = \ re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*") self.storageVDIs = {}
for key in self.lvmCache.lvs.keys(): # if the lvname has a uuid in it type = None if contains_uuid_regex.search(key) != None: if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]): type = vhdutil.VDI_TYPE_VHD vdi = key[len(lvhdutil.LV_PREFIX[type]):] elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]): type = vhdutil.VDI_TYPE_RAW vdi = key[len(lvhdutil.LV_PREFIX[type]):] else: continue
if type != None: self.storageVDIs[vdi] = type
# check if metadata volume exists try: self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) except: pass
# we don't need to hold the lock to dec refcounts of activated LVs if not self.lvActivator.deactivateAll(): raise util.SMException("failed to deactivate LVs")
try: # Add SR specific SR metadata sr_info = \ { ALLOCATION_TAG: allocation, UUID_TAG: self.uuid, NAME_LABEL_TAG: util.to_plain_string\ (self.session.xenapi.SR.get_name_label(self.sr_ref)), NAME_DESCRIPTION_TAG: util.to_plain_string\ (self.session.xenapi.SR.get_name_description(self.sr_ref)) }
vdi_info = {} for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref): vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
# Create the VDI entry in the SR metadata vdi_info[vdi_uuid] = \ { UUID_TAG: vdi_uuid, NAME_LABEL_TAG: util.to_plain_string\ (self.session.xenapi.VDI.get_name_label(vdi)), NAME_DESCRIPTION_TAG: util.to_plain_string\ (self.session.xenapi.VDI.get_name_description(vdi)), IS_A_SNAPSHOT_TAG: \ int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)), SNAPSHOT_OF_TAG: \ self.session.xenapi.VDI.get_snapshot_of(vdi), SNAPSHOT_TIME_TAG: \ self.session.xenapi.VDI.get_snapshot_time(vdi), TYPE_TAG: \ self.session.xenapi.VDI.get_type(vdi), VDI_TYPE_TAG: \ self.session.xenapi.VDI.get_sm_config(vdi)\ ['vdi_type'], READ_ONLY_TAG: \ int(self.session.xenapi.VDI.get_read_only(vdi)), METADATA_OF_POOL_TAG: \ self.session.xenapi.VDI.get_metadata_of_pool(vdi), MANAGED_TAG: \ int(self.session.xenapi.VDI.get_managed(vdi)) } LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
except Exception, e: raise xs_errors.XenError('MetadataError', \ opterr='Error upgrading SR Metadata: %s' % str(e))
try: # if a VDI is present in the metadata but not in the storage # then delete it from the metadata vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1] for vdi in vdi_info.keys(): update_map = {} if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): # delete this from metadata LVMMetadataHandler(self.mdpath).\ deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG]) else: # search for this in the metadata, compare types # self.storageVDIs is a map of vdi_uuid to vdi_type if vdi_info[vdi][VDI_TYPE_TAG] != \ self.storageVDIs[vdi_info[vdi][UUID_TAG]]: # storage type takes authority update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \ = METADATA_OBJECT_TYPE_VDI update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG] update_map[VDI_TYPE_TAG] = \ self.storageVDIs[vdi_info[vdi][UUID_TAG]] LVMMetadataHandler(self.mdpath)\ .updateMetadata(update_map) else: # This should never happen pass
except Exception, e: raise xs_errors.XenError('MetadataError', \ opterr='Error synching SR Metadata and storage: %s' % str(e))
try: # get metadata (sr_info, vdi_info) = \ LVMMetadataHandler(self.mdpath, False).getMetadata()
# First synch SR parameters self.update(self.uuid)
# Now update the VDI information in the metadata if required for vdi_offset in vdi_info.keys(): try: vdi_ref = \ self.session.xenapi.VDI.get_by_uuid(\ vdi_info[vdi_offset][UUID_TAG]) except: # may be the VDI is not in XAPI yet dont bother continue
new_name_label = util.to_plain_string\ (self.session.xenapi.VDI.get_name_label(vdi_ref)) new_name_description = util.to_plain_string\ (self.session.xenapi.VDI.get_name_description(vdi_ref))
if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \ vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \ new_name_description: update_map = {} update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ METADATA_OBJECT_TYPE_VDI update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG] update_map[NAME_LABEL_TAG] = new_name_label update_map[NAME_DESCRIPTION_TAG] = new_name_description LVMMetadataHandler(self.mdpath)\ .updateMetadata(update_map) except Exception, e: raise xs_errors.XenError('MetadataError', \ opterr='Error synching SR Metadata and XAPI: %s' % str(e))
util.SMlog("Entering _checkMetadataVolume") self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME) if self.isMaster: if self.mdexists and self.cmd == "sr_attach": try: # activate the management volume # will be deactivated at detach time self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME) self._synchSmConfigWithMetaData() if requiresUpgrade(self.mdpath): util.SMlog("This SR requires metadata upgrade.") self.updateSRMetadata( \ self.session.xenapi.SR.get_sm_config(self.sr_ref)\ ['allocation'] ) else: util.SMlog("SR metadata upgrade not required.") util.SMlog("Sync SR metadata and the state on the storage.") self.syncMetadataAndStorage() self.syncMetadataAndXapi() except Exception, e: util.SMlog("Exception in _checkMetadataVolume, " \ "Error: %s." % str(e)) elif not self.mdexists and not self.legacyMode: self._introduceMetaDataVolume()
if self.mdexists: self.legacyMode = False
util.SMlog("Synching sm-config with metadata volume")
try: # get SR info from metadata sr_info = {} map = {} try: # First try old metadata format # CHECKME: this can be removed once we stop supporting upgrade # from pre-6.0 pools xml = retrieveXMLfromFile(self.mdpath) sr_info = _parseXML(xml) except Exception, e: # That dint work, try new format valid 6.0 onwards util.SMlog("Could not read SR info from metadata using old " \ "format, trying new format. Error: %s" % str(e)) sr_info = LVMMetadataHandler(self.mdpath,False).getMetadata()[0]
if sr_info == {}: raise Exception("Failed to get SR information from metadata.")
if sr_info.has_key("allocation"): self.provision = sr_info.get("allocation") map['allocation'] = sr_info.get("allocation") else: raise Exception("Allocation key not found in SR metadata. " \ "SR info found: %s" % sr_info)
except Exception, e: raise xs_errors.XenError('MetadataError', \ opterr='Error reading SR params from ' \ 'metadata Volume: %s' % str(e)) try: map[self.FLAG_USE_VHD] = 'true' self.session.xenapi.SR.set_sm_config(self.sr_ref, map) except: raise xs_errors.XenError('MetadataError', \ opterr='Error updating sm_config key')
util.SMlog("Creating Metadata volume") try: map = {} self.lvmCache.create(self.MDVOLUME_NAME, 4*1024*1024)
# activate the management volume, will be deactivated at detach time self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
name_label = util.to_plain_string(\ self.session.xenapi.SR.get_name_label(self.sr_ref)) name_description = util.to_plain_string(\ self.session.xenapi.SR.get_name_description(self.sr_ref)) map[self.FLAG_USE_VHD] = "true" map['allocation'] = self.provision self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
# Add the SR metadata self.updateSRMetadata(self.provision) except Exception, e: raise xs_errors.XenError('MetadataError', \ opterr='Error introducing Metadata Volume: %s' % str(e))
if self.mdexists: try: self.lvmCache.remove(self.MDVOLUME_NAME) except: raise xs_errors.XenError('MetadataError', \ opterr='Failed to delete MGT Volume')
""" Refreshs the size of the backing device. Return true if all paths/devices agree on the same size. """ if hasattr(self, 'SCSIid'): # LVHDoHBASR, LVHDoISCSISR return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid')) else: # LVHDSR devices = self.root.split(',') scsiutil.refreshdev(devices) return True
""" Expands the size of the SR by growing into additional availiable space, if extra space is availiable on the backing device. Needs to be called after a successful call of _refresh_size. """ currentvgsize = lvutil._getVGstats(self.vgname)['physical_size'] # We are comparing PV- with VG-sizes that are aligned. Need a threshold resizethreshold = 100*1024*1024 # 100MB devices = self.root.split(',') totaldevicesize = 0 for device in devices: totaldevicesize = totaldevicesize + scsiutil.getsize(device) if totaldevicesize >= (currentvgsize + resizethreshold): try: if hasattr(self, 'SCSIid'): # LVHDoHBASR, LVHDoISCSISR might have slaves scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session, getattr(self, 'SCSIid')) util.SMlog("LVHDSR._expand_size for %s will resize the pv." % self.uuid) for device in devices: lvutil.resizePV(device) except: util.logException("LVHDSR._expand_size for %s failed to resize" " the PV" % self.uuid)
def create(self, uuid, size): util.SMlog("LVHDSR.create for %s" % self.uuid) if not self.isMaster: util.SMlog('sr_create blocked for non-master') raise xs_errors.XenError('LVMMaster')
if lvutil._checkVG(self.vgname): raise xs_errors.XenError('SRExists')
# Check none of the devices already in use by other PBDs if util.test_hostPBD_devs(self.session, uuid, self.root): raise xs_errors.XenError('SRInUse')
# Check serial number entry in SR records for dev in self.root.split(','): if util.test_scsiserial(self.session, dev): raise xs_errors.XenError('SRInUse')
lvutil.createVG(self.root, self.vgname)
#Update serial number string scsiutil.add_serial_record(self.session, self.sr_ref, \ scsiutil.devlist_to_serialstring(self.root.split(',')))
# since this is an SR.create turn off legacy mode self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \ self.FLAG_USE_VHD, 'true')
util.SMlog("LVHDSR.delete for %s" % self.uuid) if not self.isMaster: raise xs_errors.XenError('LVMMaster') cleanup.gc_force(self.session, self.uuid)
success = True for fileName in \ filter(lambda x: util.extractSRFromDevMapper(x) == self.uuid, \ glob.glob(DEV_MAPPER_ROOT + '*')): if util.doesFileHaveOpenHandles(fileName): util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \ "handles" % fileName) success = False continue
# Now attempt to remove the dev mapper entry if not lvutil.removeDevMapperEntry(fileName, False): success = False continue
try: lvname = os.path.basename(fileName.replace('-','/').\ replace('//', '-')) lpath = os.path.join(self.path, lvname) os.unlink(lpath) except OSError, e: if e.errno != errno.ENOENT: util.SMlog("LVHDSR.delete: failed to remove the symlink for " \ "file %s. Error: %s" % (fileName, str(e))) success = False
if success: try: if util.pathexists(self.path): os.rmdir(self.path) except Exception, e: util.SMlog("LVHDSR.delete: failed to remove the symlink " \ "directory %s. Error: %s" % (self.path, str(e))) success = False
self._removeMetadataVolume() self.lvmCache.refresh() if len(lvhdutil.getLVInfo(self.lvmCache)) > 0: raise xs_errors.XenError('SRNotEmpty')
if not success: raise Exception("LVHDSR delete failed, please refer to the log " \ "for details.")
lvutil.removeVG(self.root, self.vgname) self._cleanup()
util.SMlog("LVHDSR.attach for %s" % self.uuid)
self._cleanup(True) # in case of host crashes, if detach wasn't called
if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): raise xs_errors.XenError('SRUnavailable', \ opterr='no such volume group: %s' % self.vgname)
# Refresh the metadata status self._checkMetadataVolume()
refreshsizeok = self._refresh_size()
if self.isMaster: if refreshsizeok: self._expand_size() #Update SCSIid string util.SMlog("Calling devlist_to_serial") scsiutil.add_serial_record(self.session, self.sr_ref, \ scsiutil.devlist_to_serialstring(self.root.split(',')))
# Test Legacy Mode Flag and update if VHD volumes exist if self.isMaster and self.legacyMode: vdiInfo = lvhdutil.getVDIInfo(self.lvmCache) for uuid, info in vdiInfo.iteritems(): if info.vdiType == vhdutil.VDI_TYPE_VHD: self.legacyMode = False map = self.session.xenapi.SR.get_sm_config(self.sr_ref) self._introduceMetaDataVolume() break
# Set the block scheduler for dev in self.root.split(','): self.block_setscheduler(dev)
util.SMlog("LVHDSR.detach for %s" % self.uuid) cleanup.abort(self.uuid)
# Do a best effort cleanup of the dev mapper entries # go through all devmapper entries for this VG success = True for fileName in \ filter(lambda x: util.extractSRFromDevMapper(x) == self.uuid, \ glob.glob(DEV_MAPPER_ROOT + '*')): # check if any file has open handles if util.doesFileHaveOpenHandles(fileName): # if yes, log this and signal failure util.SMlog("LVHDSR.detach: The dev mapper entry %s has open " \ "handles" % fileName) success = False continue
# Now attempt to remove the dev mapper entry if not lvutil.removeDevMapperEntry(fileName, False): success = False continue
# also remove the symlinks from /dev/VG-XenStorage-SRUUID/* try: lvname = os.path.basename(fileName.replace('-','/').\ replace('//', '-')) lvname = os.path.join(self.path, lvname) util.force_unlink(lvname) except Exception, e: util.SMlog("LVHDSR.detach: failed to remove the symlink for " \ "file %s. Error: %s" % (fileName, str(e))) success = False
# now remove the directory where the symlinks are # this should pass as the directory should be empty by now if success: try: if util.pathexists(self.path): os.rmdir(self.path) except Exception, e: util.SMlog("LVHDSR.detach: failed to remove the symlink " \ "directory %s. Error: %s" % (self.path, str(e))) success = False
if not success: raise Exception("SR detach failed, please refer to the log " \ "for details.")
# Don't delete lock files on the master as it will break the locking # between SM and any GC thread that survives through SR.detach. # However, we should still delete lock files on slaves as it is the # only place to do so. self._cleanup(self.isMaster)
if not self.legacyMode: LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid) super(LVHDSR, self).forget_vdi(uuid)
try: lvname = '' activated = True util.SMlog("LVHDSR.scan for %s" % self.uuid) if not self.isMaster: util.SMlog('sr_scan blocked for non-master') raise xs_errors.XenError('LVMMaster')
if self._refresh_size(): self._expand_size() self.lvmCache.refresh() cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG) self._loadvdis() stats = lvutil._getVGstats(self.vgname) self.physical_size = stats['physical_size'] self.physical_utilisation = stats['physical_utilisation']
# Now check if there are any VDIs in the metadata, which are not in # XAPI if self.mdexists: vdiToSnaps = {} # get VDIs from XAPI vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref) vdi_uuids = set([]) for vdi in vdis: vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
Dict = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
for vdi in Dict.keys(): vdi_uuid = Dict[vdi][UUID_TAG] if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): if vdiToSnaps.has_key(Dict[vdi][SNAPSHOT_OF_TAG]): vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid) else: vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
if vdi_uuid not in vdi_uuids: util.SMlog("Introduce VDI %s as it is present in " \ "metadata and not in XAPI." % vdi_uuid) sm_config = {} sm_config['vdi_type'] = Dict[vdi][VDI_TYPE_TAG] lvname = "%s%s" % \ (lvhdutil.LV_PREFIX[sm_config['vdi_type']],vdi_uuid) self.lvmCache.activateNoRefcount(lvname) activated = True lvPath = os.path.join(self.path, lvname)
if Dict[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW: size = self.lvmCache.getSize( \ lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \ vdi_uuid) utilisation = \ util.roundup(lvutil.LVM_SIZE_INCREMENT, long(size)) else: parent = \ vhdutil._getVHDParentNoCheck(lvPath)
if parent != None: sm_config['vhd-parent'] = parent[len( \ lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):] size = vhdutil.getSizeVirt(lvPath) if self.provision == "thin": utilisation = \ util.roundup(lvutil.LVM_SIZE_INCREMENT, vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) else: utilisation = lvhdutil.calcSizeVHDLV(long(size))
vdi_ref = self.session.xenapi.VDI.db_introduce( vdi_uuid, Dict[vdi][NAME_LABEL_TAG], Dict[vdi][NAME_DESCRIPTION_TAG], self.sr_ref, Dict[vdi][TYPE_TAG], False, bool(int(Dict[vdi][READ_ONLY_TAG])), {}, vdi_uuid, {}, sm_config)
self.session.xenapi.VDI.set_managed(vdi_ref, bool(int(Dict[vdi][MANAGED_TAG]))) self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(size)) self.session.xenapi.VDI.set_physical_utilisation( \ vdi_ref, str(utilisation)) self.session.xenapi.VDI.set_is_a_snapshot( \ vdi_ref, bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG]))) if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): self.session.xenapi.VDI.set_snapshot_time( \ vdi_ref, DateTime(Dict[vdi][SNAPSHOT_TIME_TAG])) if Dict[vdi][TYPE_TAG] == 'metadata': self.session.xenapi.VDI.set_metadata_of_pool( \ vdi_ref, Dict[vdi][METADATA_OF_POOL_TAG])
# Update CBT status of disks either just added # or already in XAPI cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG) if cbt_logname in cbt_vdis: vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True) # For existing VDIs, update local state too # Scan in base class SR updates existing VDIs # again based on local states if self.vdis.has_key(vdi_uuid): self.vdis[vdi_uuid].cbt_enabled = True cbt_vdis.remove(cbt_logname)
# Now set the snapshot statuses correctly in XAPI for srcvdi in vdiToSnaps.keys(): try: srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi) except: # the source VDI no longer exists, continue continue
for snapvdi in vdiToSnaps[srcvdi]: try: # this might fail in cases where its already set snapref = \ self.session.xenapi.VDI.get_by_uuid(snapvdi) self.session.xenapi.VDI.set_snapshot_of(snapref, srcref) except Exception, e: util.SMlog("Setting snapshot failed. "\ "Error: %s" % str(e))
if cbt_vdis: # If we have items remaining in this list, # they are cbt_metadata VDI that XAPI doesn't know about # Add them to self.vdis and they'll get added to the DB for cbt_vdi in cbt_vdis: cbt_uuid = cbt_vdi.split(".")[0] new_vdi = self.vdi(cbt_uuid) new_vdi.ty = "cbt_metadata" new_vdi.cbt_enabled = True self.vdis[cbt_uuid] = new_vdi
ret = super(LVHDSR, self).scan(uuid) self._kickGC() return ret
finally: if lvname != '' and activated: self.lvmCache.deactivateNoRefcount(lvname)
if not lvutil._checkVG(self.vgname): return self._updateStats(uuid, 0)
if self.legacyMode: return
# synch name_label in metadata with XAPI update_map = {} update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \ METADATA_OBJECT_TYPE_SR, NAME_LABEL_TAG: util.to_plain_string( \ self.session.xenapi.SR.get_name_label(self.sr_ref)), NAME_DESCRIPTION_TAG: util.to_plain_string( \ self.session.xenapi.SR.get_name_description(self.sr_ref)) } LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) self.virtual_allocation = valloc + virtAllocDelta util.SMlog("Setting virtual_allocation of SR %s to %d" % (uuid, self.virtual_allocation)) stats = lvutil._getVGstats(self.vgname) self.physical_size = stats['physical_size'] self.physical_utilisation = stats['physical_utilisation'] self._db_update()
def probe(self): return lvutil.srlist_toxml( lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.root), lvhdutil.VG_PREFIX, (self.srcmd.params['sr_sm_config'].has_key('metadata') and \ self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
continue raise xs_errors.XenError('VDIUnavailable', \ opterr='Error scanning VDI %s' % uuid)
if self.vdis.has_key(vdi.parent): self.vdis[vdi.parent].read_only = True if geneology.has_key(vdi.parent): geneology[vdi.parent].append(uuid) else: geneology[vdi.parent] = [uuid]
# Now remove all hidden leaf nodes to avoid introducing records that # will be GC'ed util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) del self.vdis[uuid]
space_available = lvutil._getVGstats(self.vgname)['freespace'] if (space_available < amount_needed): util.SMlog("Not enough space! free space: %d, need: %d" % \ (space_available, amount_needed)) raise xs_errors.XenError('SRNoSpace')
entries = self.journaler.getAll(LVHDVDI.JRN_CLONE) for uuid, val in entries.iteritems(): util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone",self.uuid) self._handleInterruptedCloneOp(uuid, val) util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone",self.uuid) self.journaler.remove(LVHDVDI.JRN_CLONE, uuid)
entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF) if len(entries) > 0: util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***") cleanup.gc_force(self.session, self.uuid) self.lvmCache.refresh()
"""Either roll back or finalize the interrupted snapshot/clone operation. Rolling back is unsafe if the leaf VHDs have already been in use and written to. However, it is always safe to roll back while we're still in the context of the failed snapshot operation since the VBD is paused for the duration of the operation""" util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval)) lvs = lvhdutil.getLVInfo(self.lvmCache) baseUuid, clonUuid = jval.split("_")
# is there a "base copy" VDI? if not lvs.get(baseUuid): # no base copy: make sure the original is there if lvs.get(origUuid): util.SMlog("*** INTERRUPTED CLONE OP: nothing to do") return raise util.SMException("base copy %s not present, "\ "but no original %s found" % (baseUuid, origUuid))
if forceUndo: util.SMlog("Explicit revert") self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) return
if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)): util.SMlog("One or both leaves missing => revert") self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) return
vdis = lvhdutil.getVDIInfo(self.lvmCache) if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError): util.SMlog("One or both leaves invalid => revert") self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) return
orig = vdis[origUuid] base = vdis[baseUuid] self.lvActivator.activate(baseUuid, base.lvName, False) self.lvActivator.activate(origUuid, orig.lvName, False) if orig.parentUuid != baseUuid: parent = vdis[orig.parentUuid] self.lvActivator.activate(parent.uuid, parent.lvName, False) origPath = os.path.join(self.path, orig.lvName) if not vhdutil.check(origPath): util.SMlog("Orig VHD invalid => revert") self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) return
if clonUuid: clon = vdis[clonUuid] clonPath = os.path.join(self.path, clon.lvName) self.lvActivator.activate(clonUuid, clon.lvName, False) if not vhdutil.check(clonPath): util.SMlog("Clon VHD invalid => revert") self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid) return
util.SMlog("Snapshot appears valid, will not roll back") self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid)
base = lvs[baseUuid] basePath = os.path.join(self.path, base.name)
# make the parent RW if base.readonly: self.lvmCache.setReadonly(base.name, False)
ns = lvhdutil.NS_PREFIX_LVM + self.uuid origRefcountBinary = RefCounter.check(origUuid, ns)[1] origRefcountNormal = 0
# un-hide the parent if base.vdiType == vhdutil.VDI_TYPE_VHD: self.lvActivator.activate(baseUuid, base.name, False) origRefcountNormal = 1 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False) if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden: vhdutil.setHidden(basePath, False) elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden: self.lvmCache.setHidden(base.name, False)
# remove the child nodes if clonUuid and lvs.get(clonUuid): if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD: raise util.SMException("clone %s not VHD" % clonUuid) self.lvmCache.remove(lvs[clonUuid].name) if self.lvActivator.get(clonUuid, False): self.lvActivator.remove(clonUuid, False) if lvs.get(origUuid): self.lvmCache.remove(lvs[origUuid].name)
# inflate the parent to fully-allocated size if base.vdiType == vhdutil.VDI_TYPE_VHD: fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize)
# rename back origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid self.lvmCache.rename(base.name, origLV) RefCounter.reset(baseUuid, ns) if self.lvActivator.get(baseUuid, False): self.lvActivator.replace(baseUuid, origUuid, origLV, False) RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
# At this stage, tapdisk and SM vdi will be in paused state. Remove # flag to facilitate vm deactivate origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid) self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
# update LVM metadata on slaves slaves = util.get_slaves_attached_on(self.session, [origUuid]) lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname, origLV, origUuid, slaves)
util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
"""Finalize the interrupted snapshot/clone operation. This must not be called from the live snapshot op context because we attempt to pause/ unpause the VBD here (the VBD is already paused during snapshot, so it would cause a deadlock)""" base = vdis[baseUuid] clon = None if clonUuid: clon = vdis[clonUuid]
cleanup.abort(self.uuid)
# make sure the parent is hidden and read-only if not base.hidden: if base.vdiType == vhdutil.VDI_TYPE_RAW: self.lvmCache.setHidden(base.lvName) else: basePath = os.path.join(self.path, base.lvName) vhdutil.setHidden(basePath) if not base.lvReadonly: self.lvmCache.setReadonly(base.lvName, True)
# NB: since this snapshot-preserving call is only invoked outside the # snapshot op context, we assume the LVM metadata on the involved slave # has by now been refreshed and do not attempt to do it here
# Update the original record try: vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) type = self.session.xenapi.VDI.get_type(vdi_ref) sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD sm_config['vhd-parent'] = baseUuid self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) except XenAPI.Failure: util.SMlog("ERROR updating the orig record")
# introduce the new VDI records if clonUuid: try: clon_vdi = VDI.VDI(self, clonUuid) clon_vdi.read_only = False clon_vdi.location = clonUuid clon_vdi.utilisation = clon.sizeLV clon_vdi.sm_config = { "vdi_type": vhdutil.VDI_TYPE_VHD, "vhd-parent": baseUuid }
if not self.legacyMode: LVMMetadataHandler(self.mdpath).\ ensureSpaceIsAvailableForVdis(1)
clon_vdi_ref = clon_vdi._db_introduce() util.SMlog("introduced clon VDI: %s (%s)" % \ (clon_vdi_ref, clonUuid))
vdi_info = { UUID_TAG: clonUuid, NAME_LABEL_TAG: clon_vdi.label, NAME_DESCRIPTION_TAG: clon_vdi.description, IS_A_SNAPSHOT_TAG: 0, SNAPSHOT_OF_TAG: '', SNAPSHOT_TIME_TAG: '', TYPE_TAG: type, VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'], READ_ONLY_TAG: int(clon_vdi.read_only), MANAGED_TAG: int(clon_vdi.managed), METADATA_OF_POOL_TAG: '' }
if not self.legacyMode: LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
except XenAPI.Failure: util.SMlog("ERROR introducing the clon record")
try: base_vdi = VDI.VDI(self, baseUuid) # readonly parent base_vdi.label = "base copy" base_vdi.read_only = True base_vdi.location = baseUuid base_vdi.size = base.sizeVirt base_vdi.utilisation = base.sizeLV base_vdi.managed = False base_vdi.sm_config = { "vdi_type": vhdutil.VDI_TYPE_VHD, "vhd-parent": baseUuid }
if not self.legacyMode: LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
base_vdi_ref = base_vdi._db_introduce() util.SMlog("introduced base VDI: %s (%s)" % \ (base_vdi_ref, baseUuid))
vdi_info = { UUID_TAG: baseUuid, NAME_LABEL_TAG: base_vdi.label, NAME_DESCRIPTION_TAG: base_vdi.description, IS_A_SNAPSHOT_TAG: 0, SNAPSHOT_OF_TAG: '', SNAPSHOT_TIME_TAG: '', TYPE_TAG: type, VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'], READ_ONLY_TAG: int(base_vdi.read_only), MANAGED_TAG: int(base_vdi.managed), METADATA_OF_POOL_TAG: '' }
if not self.legacyMode: LVMMetadataHandler(self.mdpath).addVdi(vdi_info) except XenAPI.Failure: util.SMlog("ERROR introducing the base record")
util.SMlog("*** INTERRUPTED CLONE OP: complete")
"""Undo all VHD & SM interrupted journaled operations. This call must be serialized with respect to all operations that create journals""" # undoing interrupted inflates must be done first, since undoing VHD # ops might require inflations self.lock.acquire() try: self._undoAllInflateJournals() self._undoAllVHDJournals() self._handleInterruptedCloneOps() self._handleInterruptedCoalesceLeaf() finally: self.lock.release() self.cleanup()
return (uuid, vdi.path, val)) self.lvmCache.setReadonly(vdi.lvname, False) vhdutil.VHD_FOOTER_SIZE) self.lvmCache.setReadonly(vdi.lvname, True) lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, self.vgname, vdi.lvname, uuid)
"""check if there are VHD journals in existence and revert them""" journals = lvhdutil.getAllVHDJournals(self.lvmCache) if len(journals) == 0: return self._loadvdis() for uuid, jlvName in journals: vdi = self.vdis[uuid] util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path)) self.lvActivator.activate(uuid, vdi.lvname, False) self.lvmCache.activateNoRefcount(jlvName) fullSize = lvhdutil.calcSizeVHDLV(vdi.size) lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize) try: jFile = os.path.join(self.path, jlvName) vhdutil.revert(vdi.path, jFile) except util.CommandException: util.logException("VHD journal revert") vhdutil.check(vdi.path) util.SMlog("VHD revert failed but VHD ok: removing journal") # Attempt to reclaim unused space vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False) NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt) if NewSize < fullSize: lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize)) lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid, self.vgname, vdi.lvname, uuid) self.lvmCache.remove(jlvName) delattr(self,"vdiInfo") delattr(self,"allVDIs")
masterRef = util.get_this_host_ref(self.session) args = {"vgName": self.vgname, "action1": "deactivateNoRefcount", "lvName1": origOldLV} for hostRef in hostRefs: if hostRef == masterRef: continue util.SMlog("Deactivate VDI on %s" % hostRef) rv = eval(self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args)) util.SMlog("call-plugin returned: %s" % rv) if not rv: raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
baseUuid, baseLV): """We need to reactivate the original LV on each slave (note that the name for the original LV might change), as well as init the refcount for the base LV""" args = {"vgName" : self.vgname, "action1": "refresh", "lvName1": origLV, "action2": "activate", "ns2" : lvhdutil.NS_PREFIX_LVM + self.uuid, "lvName2": baseLV, "uuid2" : baseUuid}
masterRef = util.get_this_host_ref(self.session) for hostRef in hostRefs: if hostRef == masterRef: continue util.SMlog("Updating %s, %s, %s on slave %s" % \ (origOldLV, origLV, baseLV, hostRef)) rv = eval(self.session.xenapi.host.call_plugin( hostRef, self.PLUGIN_ON_SLAVE, "multi", args)) util.SMlog("call-plugin returned: %s" % rv) if not rv: raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
"""Reactivate and refresh CBT log file on slaves""" args = {"vgName" : self.vgname, "action1": "deactivateNoRefcount", "lvName1": cbtlog, "action2": "refresh", "lvName2": cbtlog}
masterRef = util.get_this_host_ref(self.session) for hostRef in hostRefs: if hostRef == masterRef: continue util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef)) rv = eval(self.session.xenapi.host.call_plugin( hostRef, self.PLUGIN_ON_SLAVE, "multi", args)) util.SMlog("call-plugin returned: %s" % rv) if not rv: raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
"""Tell the slave we deleted the base image""" args = {"vgName" : self.vgname, "action1": "cleanupLockAndRefcount", "uuid1": baseUuid, "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid}
masterRef = util.get_this_host_ref(self.session) for hostRef in hostRefs: if hostRef == masterRef: continue util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef)) rv = eval(self.session.xenapi.host.call_plugin( hostRef, self.PLUGIN_ON_SLAVE, "multi", args)) util.SMlog("call-plugin returned: %s" % rv) if not rv: raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
"""delete stale refcounter, flag, and lock files""" RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid) IPCFlag(self.uuid).clearAll() if not skipLockCleanup: Lock.cleanupAll(self.uuid) Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
util.SMlog("Test mode: %s" % self.testMode) if self.ENV_VAR_VHD_TEST.get(self.testMode): os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes" util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
# don't bother if an instance already running (this is just an # optimization to reduce the overhead of forking a new process if we # don't have to, but the process will check the lock anyways) lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) if not lockRunning.acquireNoblock(): if cleanup.should_preempt(self.session, self.uuid): util.SMlog("Aborting currently-running coalesce of garbage VDI") try: if not cleanup.abort(self.uuid, soft=True): util.SMlog("The GC has already been scheduled to " "re-start") except util.CommandException, e: if e.code != errno.ETIMEDOUT: raise util.SMlog('failed to abort the GC') else: util.SMlog("A GC instance already running, not kicking") return else: lockRunning.release()
util.SMlog("Kicking GC") cleanup.gc(self.session, self.uuid, True)
# Ensure we have space for at least one LV self._ensureSpaceAvailable(self.journaler.LV_SIZE)
self.sm_config_override['vhd-parent'] = self.parent else:
# scan() didn't run: determine the type of the VDI manually if self._determineType(): return
# the VDI must be in the process of being created self.exists = False if self.sr.srcmd.params.has_key("vdi_sm_config") and \ self.sr.srcmd.params["vdi_sm_config"].has_key("type"): type = self.sr.srcmd.params["vdi_sm_config"]["type"] if type == PARAM_RAW: self.vdi_type = vhdutil.VDI_TYPE_RAW elif type == PARAM_VHD: self.vdi_type = vhdutil.VDI_TYPE_VHD if self.sr.cmd == 'vdi_create' and self.sr.legacyMode: raise xs_errors.XenError('VDICreate', \ opterr='Cannot create VHD type disk in legacy mode') else: raise xs_errors.XenError('VDICreate', opterr='bad type') self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid) self.path = os.path.join(self.sr.path, self.lvname)
util.SMlog("LVHDVDI.create for %s" % self.uuid) if not self.sr.isMaster: raise xs_errors.XenError('LVMMaster') if self.exists: raise xs_errors.XenError('VDIExists')
size = vhdutil.validate_and_round_vhd_size(long(size))
util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" %\ (self.vdi_type, self.path, size)) lvSize = 0 self.sm_config = self.sr.srcmd.params["vdi_sm_config"] if self.vdi_type == vhdutil.VDI_TYPE_RAW: lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, long(size)) else: if self.sr.provision == "thin": lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) elif self.sr.provision == "thick": lvSize = lvhdutil.calcSizeVHDLV(long(size))
self.sr._ensureSpaceAvailable(lvSize)
try: self.sr.lvmCache.create(self.lvname, lvSize) if self.vdi_type == vhdutil.VDI_TYPE_RAW: self.size = self.sr.lvmCache.getSize(self.lvname) else: vhdutil.create(self.path, long(size), False, lvhdutil.MSIZE_MB) self.size = vhdutil.getSizeVirt(self.path) self.sr.lvmCache.deactivateNoRefcount(self.lvname) except util.CommandException, e: util.SMlog("Unable to create VDI"); self.sr.lvmCache.remove(self.lvname) raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
self.utilisation = lvSize self.sm_config["vdi_type"] = self.vdi_type
if not self.sr.legacyMode: LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
self.ref = self._db_introduce() self.sr._updateStats(self.sr.uuid, self.size)
vdi_info = { UUID_TAG: self.uuid, NAME_LABEL_TAG: util.to_plain_string(self.label), NAME_DESCRIPTION_TAG: util.to_plain_string(self.description), IS_A_SNAPSHOT_TAG: 0, SNAPSHOT_OF_TAG: '', SNAPSHOT_TIME_TAG: '', TYPE_TAG: self.ty, VDI_TYPE_TAG: self.vdi_type, READ_ONLY_TAG: int(self.read_only), MANAGED_TAG: int(self.managed), METADATA_OF_POOL_TAG: '' }
if not self.sr.legacyMode: LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
return VDI.VDI.get_params(self)
util.SMlog("LVHDVDI.delete for %s" % self.uuid) try: self._loadThis() except SR.SRException, e: # Catch 'VDI doesn't exist' exception if e.errno == 46: return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only) raise
vdi_ref = self.sr.srcmd.params['vdi_ref'] if not self.session.xenapi.VDI.get_managed(vdi_ref): raise xs_errors.XenError("VDIDelete", \ opterr="Deleting non-leaf node not permitted")
if not self.hidden: self._markHidden()
if not data_only: # Remove from XAPI and delete from MGT self._db_forget() else: # If this is a data_destroy call, don't remove from XAPI db # Only delete from MGT if not self.sr.legacyMode: LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
# deactivate here because it might be too late to do it in the "final" # step: GC might have removed the LV by then if self.sr.lvActivator.get(self.uuid, False): self.sr.lvActivator.deactivate(self.uuid, False)
try: self.sr.lvmCache.remove(self.lvname) self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid) self.sr.lock.cleanupAll(vdi_uuid) except SR.SRException, e: util.SMlog( "Failed to remove the volume (maybe is leaf coalescing) " "for %s err:%d" % (self.uuid, e.errno))
self.sr._updateStats(self.sr.uuid, -self.size) self.sr._kickGC() return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
util.SMlog("LVHDVDI.attach for %s" % self.uuid) if self.sr.journaler.hasJournals(self.uuid): raise xs_errors.XenError('VDIUnavailable', opterr='Interrupted operation detected on this VDI, ' 'scan SR first to trigger auto-repair')
writable = ('args' not in self.sr.srcmd.params) or \ (self.sr.srcmd.params['args'][0] == "true") needInflate = True if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable: needInflate = False else: self._loadThis() if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size): needInflate = False
if needInflate: try: self._prepareThin(True) except: util.logException("attach") raise xs_errors.XenError('LVMProvisionAttach')
try: return self._attach() finally: if not self.sr.lvActivator.deactivateAll(): util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
util.SMlog("LVHDVDI.detach for %s" % self.uuid) self._loadThis() already_deflated = (self.utilisation < \ lvhdutil.calcSizeVHDLV(self.size)) needDeflate = True if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated: needDeflate = False elif self.sr.provision == "thick": needDeflate = False # except for snapshots, which are always deflated vdi_ref = self.sr.srcmd.params['vdi_ref'] snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) if snap: needDeflate = True
if needDeflate: try: self._prepareThin(False) except: util.logException("_prepareThin") raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
try: self._detach() finally: if not self.sr.lvActivator.deactivateAll(): raise xs_errors.XenError("SMGeneral", opterr="deactivation")
# We only support offline resize util.SMlog("LVHDVDI.resize for %s" % self.uuid) if not self.sr.isMaster: raise xs_errors.XenError('LVMMaster')
self._loadThis() if self.hidden: raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
if size < self.size: util.SMlog('vdi_resize: shrinking not supported: ' + \ '(current size: %d, new size: %d)' % (self.size, size)) raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
size = vhdutil.validate_and_round_vhd_size(long(size))
if size == self.size: return VDI.VDI.get_params(self)
if self.vdi_type == vhdutil.VDI_TYPE_RAW: lvSizeOld = self.size lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size) else: lvSizeOld = self.utilisation lvSizeNew = lvhdutil.calcSizeVHDLV(size) if self.sr.provision == "thin": # VDI is currently deflated, so keep it deflated lvSizeNew = lvSizeOld assert(lvSizeNew >= lvSizeOld) spaceNeeded = lvSizeNew - lvSizeOld self.sr._ensureSpaceAvailable(spaceNeeded)
oldSize = self.size if self.vdi_type == vhdutil.VDI_TYPE_RAW: self.sr.lvmCache.setSize(self.lvname, lvSizeNew) self.size = self.sr.lvmCache.getSize(self.lvname) self.utilisation = self.size else: if lvSizeNew != lvSizeOld: lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid, lvSizeNew) vhdutil.setSizeVirtFast(self.path, size) self.size = vhdutil.getSizeVirt(self.path) self.utilisation = self.sr.lvmCache.getSize(self.lvname)
vdi_ref = self.sr.srcmd.params['vdi_ref'] self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size)) self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, str(self.utilisation)) self.sr._updateStats(self.sr.uuid, self.size - oldSize) super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) return VDI.VDI.get_params(self)
return self._do_snapshot( sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1)) if self.vdi_type != vhdutil.VDI_TYPE_VHD: raise xs_errors.XenError('Unimplemented')
parent_uuid = vdi1 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid assert(self.sr.lvmCache.checkLV(parent_lvname)) parent_path = os.path.join(self.sr.path, parent_lvname)
self.sr.lvActivator.activate(self.uuid, self.lvname, False) self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
vhdutil.setParent(self.path, parent_path, False) vhdutil.setHidden(parent_path) self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid, True): raise util.SMException("failed to refresh VDI %s" % self.uuid)
util.SMlog("Compose done")
util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid) if self.vdi_type != vhdutil.VDI_TYPE_VHD: raise xs_errors.XenError('Unimplemented')
self.sr.lvActivator.activate(self.uuid, self.lvname, False)
# safety check if not vhdutil.hasParent(self.path): raise util.SMException("ERROR: VDI %s has no parent, " + \ "will not reset contents" % self.uuid)
vhdutil.killData(self.path)
self._chainSetActive(True, True, True) if not util.pathexists(self.path): raise xs_errors.XenError('VDIUnavailable', \ opterr='Could not find: %s' % self.path)
if not hasattr(self,'xenstore_data'): self.xenstore_data = {}
self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \ scsiutil.gen_synthetic_page_data(self.uuid)))
self.xenstore_data['storage-type']='lvm' self.xenstore_data['vdi-type']=self.vdi_type
self.attached = True self.sr.lvActivator.persist() return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
self._chainSetActive(False, True) self.attached = False
cloneOp=False, secondary=None, cbtlog=None): # If cbt enabled, save file consistency state if cbtlog is not None: if blktap2.VDI.tap_status(self.session, vdi_uuid): consistency_state = False else: consistency_state = True util.SMlog("Saving log consistency state of %s for vdi: %s" % (consistency_state, vdi_uuid)) else: consistency_state = None
pause_time = time.time() if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid)
snapResult = None try: snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state) except Exception, e1: try: blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary=None) except Exception, e2: util.SMlog('WARNING: failed to clean up failed snapshot: ' '%s (error ignored)' % e2) raise blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) unpause_time = time.time() if (unpause_time - pause_time) > LONG_SNAPTIME: util.SMlog('WARNING: snapshot paused VM for %s seconds' % (unpause_time - pause_time)) return snapResult
util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
if not self.sr.isMaster: raise xs_errors.XenError('LVMMaster') if self.sr.legacyMode: raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
self._loadThis() if self.hidden: raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
self.sm_config = self.session.xenapi.VDI.get_sm_config( \ self.sr.srcmd.params['vdi_ref']) if self.sm_config.has_key("type") and self.sm_config['type']=='raw': if not util.fistpoint.is_active("testsm_clone_allow_raw"): raise xs_errors.XenError('Unimplemented', \ opterr='Raw VDI, snapshot or clone not permitted')
# we must activate the entire VHD chain because the real parent could # theoretically be anywhere in the chain if all VHDs under it are empty self._chainSetActive(True, False) if not util.pathexists(self.path): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI unavailable: %s' % (self.path))
if self.vdi_type == vhdutil.VDI_TYPE_VHD: depth = vhdutil.getDepth(self.path) if depth == -1: raise xs_errors.XenError('VDIUnavailable', \ opterr='failed to get VHD depth') elif depth >= vhdutil.MAX_CHAIN_SIZE: raise xs_errors.XenError('SnapshotChainTooLong')
self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \ self.sr.srcmd.params['vdi_ref'])
fullpr = lvhdutil.calcSizeVHDLV(self.size) thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \ vhdutil.calcOverheadEmpty(lvhdutil.MSIZE)) lvSizeOrig = thinpr lvSizeClon = thinpr
hostRefs = [] if self.sr.cmd == "vdi_snapshot": hostRefs = util.get_hosts_attached_on(self.session, [self.uuid]) if hostRefs: lvSizeOrig = fullpr if self.sr.provision == "thick": if not self.issnap: lvSizeOrig = fullpr if self.sr.cmd != "vdi_snapshot": lvSizeClon = fullpr
if (snapType == VDI.SNAPSHOT_SINGLE or snapType == VDI.SNAPSHOT_INTERNAL): lvSizeClon = 0
# the space required must include 2 journal LVs: a clone journal and an # inflate journal (for the failure handling size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE lvSizeBase = self.size if self.vdi_type == vhdutil.VDI_TYPE_VHD: lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT, vhdutil.getSizePhys(self.path)) size_req -= (self.utilisation - lvSizeBase) self.sr._ensureSpaceAvailable(size_req)
if hostRefs: self.sr._updateSlavesPreClone(hostRefs, self.lvname)
baseUuid = util.gen_uuid() origUuid = self.uuid clonUuid = "" if snapType == VDI.SNAPSHOT_DOUBLE: clonUuid = util.gen_uuid() jval = "%s_%s" % (baseUuid, clonUuid) with lvutil.LvmLockContext(): # This makes multiple LVM calls so take the lock early self.sr.journaler.create(self.JRN_CLONE, origUuid, jval) util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
try: with lvutil.LvmLockContext(): # self becomes the "base vdi" origOldLV = self.lvname baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid self.sr.lvmCache.rename(self.lvname, baseLV) self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False) RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) self.uuid = baseUuid self.lvname = baseLV self.path = os.path.join(self.sr.path, baseLV) self.label = "base copy" self.read_only = True self.location = self.uuid self.managed = False
# shrink the base copy to the minimum - we do it before creating # the snapshot volumes to avoid requiring double the space if self.vdi_type == vhdutil.VDI_TYPE_VHD: lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase) self.utilisation = lvSizeBase util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
snapVDI = self._createSnap(origUuid, lvSizeOrig, False) util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid) snapVDI2 = None if snapType == VDI.SNAPSHOT_DOUBLE: snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True) # If we have CBT enabled on the VDI, # set CBT status for the new snapshot disk if cbtlog: snapVDI2.cbt_enabled = True util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
# note: it is important to mark the parent hidden only AFTER the # new VHD children have been created, which are referencing it; # otherwise we would introduce a race with GC that could reclaim # the parent before we snapshot it if self.vdi_type == vhdutil.VDI_TYPE_RAW: self.sr.lvmCache.setHidden(self.lvname) else: vhdutil.setHidden(self.path) util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
# set the base copy to ReadOnly # Do this outside the LvmLockContext to avoid deadlock self.sr.lvmCache.setReadonly(self.lvname, True) util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
if hostRefs: self.sr._updateSlavesOnClone(hostRefs, origOldLV, snapVDI.lvname, self.uuid, self.lvname)
# Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog: snapVDI._cbt_snapshot(clonUuid, cbt_consistency) if hostRefs: cbtlog_file = self._get_cbt_logname(snapVDI.uuid) try: self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file) except: alert_name = "VDI_CBT_SNAPSHOT_FAILED" alert_str = ("Creating CBT snapshot for {} failed" .format(snapVDI.uuid)) snapVDI._disable_cbt_on_error(alert_name, alert_str) pass
except (util.SMException, XenAPI.Failure), e: util.logException("LVHDVDI._snapshot") self._failClone(origUuid, jval, str(e)) util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal",self.sr.uuid)
with lvutil.LvmLockContext(): # This makes multiple LVM calls so take the lock early self.sr.journaler.remove(self.JRN_CLONE, origUuid)
return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
"""Snapshot self and return the snapshot VDI object""" snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid snapPath = os.path.join(self.sr.path, snapLV) self.sr.lvmCache.create(snapLV, long(snapSizeLV)) util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid) if isNew: RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) self.sr.lvActivator.add(snapUuid, snapLV, False) parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW) vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB) snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid)
snapVDI = LVHDVDI(self.sr, snapUuid) snapVDI.read_only = False snapVDI.location = snapUuid snapVDI.size = self.size snapVDI.utilisation = snapSizeLV snapVDI.sm_config = dict() for key, val in self.sm_config.iteritems(): if key not in ["type", "vdi_type", "vhd-parent", "paused"] and \ not key.startswith("host_"): snapVDI.sm_config[key] = val snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD snapVDI.sm_config["vhd-parent"] = snapParent snapVDI.lvname = snapLV return snapVDI
if snapType is not VDI.SNAPSHOT_INTERNAL: self.sr._updateStats(self.sr.uuid, self.size) basePresent = True
# Verify parent locator field of both children and delete basePath if # unused snapParent = snapVDI.sm_config["vhd-parent"] snap2Parent = "" if snapVDI2: snap2Parent = snapVDI2.sm_config["vhd-parent"] if snapParent != self.uuid and \ (not snapVDI2 or snap2Parent != self.uuid): util.SMlog("%s != %s != %s => deleting unused base %s" % \ (snapParent, self.uuid, snap2Parent, self.lvname)) RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) self.sr.lvmCache.remove(self.lvname) self.sr.lvActivator.remove(self.uuid, False) if hostRefs: self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname) basePresent = False else: # assign the _binary_ refcount of the original VDI to the new base # VDI (but as the normal refcount, since binary refcounts are only # for leaf nodes). The normal refcount of the child is not # transferred to to the base VDI because normal refcounts are # incremented and decremented individually, and not based on the # VHD chain (i.e., the child's normal refcount will be decremented # independently of its parent situation). Add 1 for this clone op. # Note that we do not need to do protect the refcount operations # below with per-VDI locking like we do in lvutil because at this # point we have exclusive access to the VDIs involved. Other SM # operations are serialized by the Agent or with the SR lock, and # any coalesce activations are serialized with the SR lock. (The # coalesce activates the coalesced VDI pair in the beginning, which # cannot affect the VDIs here because they cannot possibly be # involved in coalescing at this point, and at the relinkSkip step # that activates the children, which takes the SR lock.) ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns) RefCounter.set(self.uuid, bcnt + 1, 0, ns)
# the "paused" and "host_*" sm-config keys are special and must stay on # the leaf without being inherited by anyone else for key in filter(lambda x: x == "paused" or x.startswith("host_"), self.sm_config.keys()): snapVDI.sm_config[key] = self.sm_config[key] del self.sm_config[key]
# Introduce any new VDI records & update the existing one type = self.session.xenapi.VDI.get_type( \ self.sr.srcmd.params['vdi_ref']) if snapVDI2: LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) vdiRef = snapVDI2._db_introduce() if cloneOp: vdi_info = { UUID_TAG: snapVDI2.uuid, NAME_LABEL_TAG: util.to_plain_string(\ self.session.xenapi.VDI.get_name_label( \ self.sr.srcmd.params['vdi_ref'])), NAME_DESCRIPTION_TAG: util.to_plain_string(\ self.session.xenapi.VDI.get_name_description\ (self.sr.srcmd.params['vdi_ref'])), IS_A_SNAPSHOT_TAG: 0, SNAPSHOT_OF_TAG: '', SNAPSHOT_TIME_TAG: '', TYPE_TAG: type, VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], READ_ONLY_TAG: 0, MANAGED_TAG: int(snapVDI2.managed), METADATA_OF_POOL_TAG: '' } else: util.SMlog("snapshot VDI params: %s" % \ self.session.xenapi.VDI.get_snapshot_time(vdiRef)) vdi_info = { UUID_TAG: snapVDI2.uuid, NAME_LABEL_TAG: util.to_plain_string(\ self.session.xenapi.VDI.get_name_label( \ self.sr.srcmd.params['vdi_ref'])), NAME_DESCRIPTION_TAG: util.to_plain_string(\ self.session.xenapi.VDI.get_name_description\ (self.sr.srcmd.params['vdi_ref'])), IS_A_SNAPSHOT_TAG: 1, SNAPSHOT_OF_TAG: snapVDI.uuid, SNAPSHOT_TIME_TAG: '', TYPE_TAG: type, VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'], READ_ONLY_TAG: 0, MANAGED_TAG: int(snapVDI2.managed), METADATA_OF_POOL_TAG: '' }
LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \ (vdiRef, snapVDI2.uuid))
if basePresent: LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1) vdiRef = self._db_introduce() vdi_info = { UUID_TAG: self.uuid, NAME_LABEL_TAG: self.label, NAME_DESCRIPTION_TAG: self.description, IS_A_SNAPSHOT_TAG: 0, SNAPSHOT_OF_TAG: '', SNAPSHOT_TIME_TAG: '', TYPE_TAG: type, VDI_TYPE_TAG: self.sm_config['vdi_type'], READ_ONLY_TAG: 1, MANAGED_TAG: 0, METADATA_OF_POOL_TAG: '' }
LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info) util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \ (vdiRef, self.uuid))
# Update the original record vdi_ref = self.sr.srcmd.params['vdi_ref'] self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config) self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \ str(snapVDI.utilisation))
# Return the info on the new snap VDI snap = snapVDI2 if not snap: snap = self if not basePresent: # a single-snapshot of an empty VDI will be a noop, resulting # in no new VDIs, so return the existing one. The GC wouldn't # normally try to single-snapshot an empty VHD of course, but # if an external snapshot operation manages to sneak in right # before a snapshot-coalesce phase, we would get here snap = snapVDI return snap.get_params()
self.managed = False else: self.sm_config_override = {'vdi_type': self.vdi_type}
self.vdi_type = lvInfo.vdiType self.lvname = lvInfo.name self.size = lvInfo.size self.utilisation = lvInfo.size self.hidden = lvInfo.hidden self.active = lvInfo.active self.readonly = lvInfo.readonly self.parent = '' self.path = os.path.join(self.sr.path, self.lvname) if hasattr(self, "sm_config_override"): self.sm_config_override["vdi_type"] = self.vdi_type else: self.sm_config_override = {'vdi_type': self.vdi_type} if self.vdi_type == vhdutil.VDI_TYPE_RAW: self.loaded = True
self.size = vhdInfo.sizeVirt self.parent = vhdInfo.parentUuid self.hidden = vhdInfo.hidden self.loaded = True
"""Determine whether this is a raw or a VHD VDI""" if self.sr.srcmd.params.has_key("vdi_ref"): vdi_ref = self.sr.srcmd.params["vdi_ref"] sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if sm_config.get("vdi_type"): self.vdi_type = sm_config["vdi_type"] prefix = lvhdutil.LV_PREFIX[self.vdi_type] self.lvname = "%s%s" % (prefix, self.uuid) self.path = os.path.join(self.sr.path, self.lvname) self.sm_config_override = sm_config return True
# LVM commands can be costly, so check the file directly first in case # the LV is active found = False for t in lvhdutil.VDI_TYPES: lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid) path = os.path.join(self.sr.path, lvname) if util.pathexists(path): if found: raise xs_errors.XenError('VDILoad', opterr="multiple VDI's: uuid %s" % self.uuid) found = True self.vdi_type = t self.lvname = lvname self.path = path if found: return True
# now list all LV's if not lvutil._checkVG(self.sr.vgname): # when doing attach_from_config, the VG won't be there yet return False
lvs = lvhdutil.getLVInfo(self.sr.lvmCache) if lvs.get(self.uuid): self._initFromLVInfo(lvs[self.uuid]) return True return False
"""Load VDI info for this VDI and activate the LV if it's VHD. We don't do it in VDI.load() because not all VDI operations need it.""" if self.loaded: if self.vdi_type == vhdutil.VDI_TYPE_VHD: self.sr.lvActivator.activate(self.uuid, self.lvname, False) return try: lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname) except util.CommandException, e: raise xs_errors.XenError('VDIUnavailable', opterr= '%s (LV scan error)' % os.strerror(abs(e.code))) if not lvs.get(self.uuid): raise xs_errors.XenError('VDIUnavailable', opterr='LV not found') self._initFromLVInfo(lvs[self.uuid]) if self.vdi_type == vhdutil.VDI_TYPE_VHD: self.sr.lvActivator.activate(self.uuid, self.lvname, False) vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False) if not vhdInfo: raise xs_errors.XenError('VDIUnavailable', \ opterr='getVHDInfo failed') self._initFromVHDInfo(vhdInfo) self.loaded = True
if binary: (count, bcount) = RefCounter.checkLocked(self.uuid, lvhdutil.NS_PREFIX_LVM + self.sr.uuid) if (active and bcount > 0) or (not active and bcount == 0): return # this is a redundant activation/deactivation call
vdiList = {self.uuid: self.lvname} if self.vdi_type == vhdutil.VDI_TYPE_VHD: vdiList = vhdutil.getParentChain(self.lvname, lvhdutil.extractUuid, self.sr.vgname) for uuid, lvName in vdiList.iteritems(): binaryParam = binary if uuid != self.uuid: binaryParam = False # binary param only applies to leaf nodes if active: self.sr.lvActivator.activate(uuid, lvName, binaryParam, persistent) else: # just add the LVs for deactivation in the final (cleanup) # step. The LVs must not have been activated during the current # operation self.sr.lvActivator.add(uuid, lvName, binaryParam)
try: self.sr._handleInterruptedCloneOp(uuid, jval, True) self.sr.journaler.remove(self.JRN_CLONE, uuid) except Exception, e: util.SMlog('WARNING: failed to clean up failed snapshot: ' \ ' %s (error ignored)' % e) raise xs_errors.XenError('VDIClone', opterr=msg)
if self.vdi_type == vhdutil.VDI_TYPE_RAW: self.sr.lvmCache.setHidden(self.lvname) else: vhdutil.setHidden(self.path) self.hidden = 1
origUtilisation = self.sr.lvmCache.getSize(self.lvname) if self.sr.isMaster: # the master can prepare the VDI locally if attach: lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid) else: lvhdutil.detachThin(self.session, self.sr.lvmCache, self.sr.uuid, self.uuid) else: fn = "attach" if not attach: fn = "detach" pools = self.session.xenapi.pool.get_all() master = self.session.xenapi.pool.get_master(pools[0]) rv = self.session.xenapi.host.call_plugin( master, self.sr.THIN_PLUGIN, fn, {"srUuid": self.sr.uuid, "vdiUuid": self.uuid}) util.SMlog("call-plugin returned: %s" % rv) if not rv: raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN) # refresh to pick up the size change on this slave self.sr.lvmCache.activateNoRefcount(self.lvname, True)
self.utilisation = self.sr.lvmCache.getSize(self.lvname) if origUtilisation != self.utilisation: vdi_ref = self.sr.srcmd.params['vdi_ref'] self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, str(self.utilisation)) stats = lvutil._getVGstats(self.sr.vgname) sr_utilisation = stats['physical_utilisation'] self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref, str(sr_utilisation))
if self.sr.legacyMode: return
#Synch the name_label of this VDI on storage with the name_label in XAPI vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid) update_map = {} update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \ METADATA_OBJECT_TYPE_VDI update_map[UUID_TAG] = self.uuid update_map[NAME_LABEL_TAG] = util.to_plain_string(\ self.session.xenapi.VDI.get_name_label(vdi_ref)) update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string(\ self.session.xenapi.VDI.get_name_description(vdi_ref)) update_map[SNAPSHOT_TIME_TAG] = \ self.session.xenapi.VDI.get_snapshot_time(vdi_ref) update_map[METADATA_OF_POOL_TAG] = \ self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref) LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
self.sr.ensureCBTSpace()
logname = self._get_cbt_logname(self.uuid) self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG) logpath = super(LVHDVDI, self)._create_cbt_log() self.sr.lvmCache.deactivateNoRefcount(logname) return logpath
logpath = self._get_cbt_logpath(self.uuid) if self._cbt_log_exists(logpath): logname = self._get_cbt_logname(self.uuid) self.sr.lvmCache.remove(logname)
oldname = os.path.basename(oldpath) newname = os.path.basename(newpath) self.sr.lvmCache.rename(oldname, newname)
self.sr.lvmCache.refresh() if not self.sr.lvmCache.is_active(lv_name): try: self.sr.lvmCache.activateNoRefcount(lv_name) return True except Exception, e: util.SMlog("Exception in _activate_cbt_log, " "Error: %s." % str(e)) else: return False
try: self.sr.lvmCache.deactivateNoRefcount(lv_name) except Exception, e: util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
return lvutil.exists(logpath)
SRCommand.run(LVHDSR, DRIVER_INFO) else: |