def clone(self, sr_uuid, vdi_uuid): dest = util.gen_uuid () args = [] args.append("vdi_clone") args.append(sr_uuid) args.append(vdi_uuid) args.append(dest) if self.hidden: raise xs_errors.XenError('VDIClone', opterr='hidden VDI') depth = vhdutil.getDepth(self.path) if depth == -1: raise xs_errors.XenError('VDIUnavailable', \ opterr='failed to get VHD depth') elif depth >= vhdutil.MAX_CHAIN_SIZE: raise xs_errors.XenError('SnapshotChainTooLong') # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() if (sr_size - reserved) < \ ((self.size + VDI.VDIMetadataSize(SR.DEFAULT_TAP, self.size))*2): raise xs_errors.XenError('SRNoSpace') newuuid = util.gen_uuid() src = self.path dst = os.path.join(self.sr.path, "%s.%s" % (dest,self.vdi_type)) newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid,self.vdi_type)) newsrcname = "%s.%s" % (newuuid,self.vdi_type) if not self._checkpath(src): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI %s unavailable %s' % (vdi_uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: try: util.ioretry(lambda: os.rename(src,newsrc)) except util.CommandException, inst: if inst.code != errno.ENOENT: # failed to rename, simply raise error util.end_log_entry(self.sr.path, self.path, ["error"]) raise try: util.ioretry(lambda: self._dualsnap(src, dst, newsrcname)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) except util.CommandException, inst: if inst.code != errno.EIO: raise
def probe(self): self.uuid = util.gen_uuid() if self.mpath == 'true' and self.dconf.has_key('SCSIid'): # When multipathing is enabled, since we don't refcount the # multipath maps, we should not attempt to do the iscsi.attach/ # detach when the map is already present, as this will remove it # (which may well be in use). maps = [] try: maps = mpath_cli.list_maps() except: pass if self.dconf['SCSIid'] in maps: raise xs_errors.XenError('SRInUse') self.iscsi.attach(self.uuid) if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid): util.SMlog("Unable to detect LUN") raise xs_errors.XenError('InvalidDev') self._pathrefresh(OCFSoISCSISR) out = OCFSSR.OCFSSR.probe(self) self.iscsi.detach(self.uuid) return out
def probe(self): self.uuid = util.gen_uuid() # When multipathing is enabled, since we don't refcount the multipath maps, # we should not attempt to do the iscsi.attach/detach when the map is already present, # as this will remove it (which may well be in use). if self.mpath == 'true' and self.dconf.has_key('SCSIid'): maps = [] mpp_lun = False try: if (mpp_luncheck.is_RdacLun(self.dconf['SCSIid'])): mpp_lun = True link=glob.glob('/dev/disk/mpInuse/%s-*' % self.dconf['SCSIid']) else: maps = mpath_cli.list_maps() except: pass if (mpp_lun): if (len(link)): raise xs_errors.XenError('SRInUse') else: if self.dconf['SCSIid'] in maps: raise xs_errors.XenError('SRInUse') self.iscsi.attach(self.uuid) if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid): util.SMlog("Unable to detect LUN") raise xs_errors.XenError('InvalidDev') self._pathrefresh(LVHDoISCSISR) out = LVHDSR.LVHDSR.probe(self) self.iscsi.detach(self.uuid) return out
def parse(self): if len(sys.argv) <> 2: util.SMlog( "Failed to parse commandline; wrong number of arguments; argv = %s" % (repr(sys.argv))) raise xs_errors.XenError('BadRequest') try: params, methodname = xmlrpclib.loads(sys.argv[1]) self.cmd = methodname params = params[0] # expect a single struct self.params = params # params is a dictionary self.dconf = params['device_config'] if params.has_key('sr_uuid'): self.sr_uuid = params['sr_uuid'] if params.has_key('vdi_uuid'): self.vdi_uuid = params['vdi_uuid'] elif self.cmd == "vdi_create": self.vdi_uuid = util.gen_uuid() except Exception, e: util.SMlog( "Failed to parse commandline; exception = %s argv = %s" % (str(e), repr(sys.argv))) raise xs_errors.XenError('BadRequest')
def clone(self, vdi, vdi_info): meta = read_metadata(self.path, vdi) parent = data_path(self.path, meta["data"]) # Create two vhd leaves whose parent is [vdi] left = self.make_fresh_data_name() vhd.make_leaf(self.path + "/" + data_dir + "/" + left + vhd_suffix, parent) right = self.make_fresh_data_name() vhd.make_leaf(self.path + "/" + data_dir + "/" + right + vhd_suffix, parent) # Remap the original [vdi]'s location to point to the first leaf's path parent_info = read_metadata(self.path, vdi) parent_info["data"] = left write_metadata(self.path, vdi, parent_info) # The cloned vdi's location points to the second leaf's path clone = make_fresh_metadata_name(self.path, self.hostname, vdi_info["name_label"]) vdi_info["vdi"] = clone vdi_info["data"] = right vdi_info["virtual_size"] = parent_info["virtual_size"] vdi_info["content_id"] = parent_info["content_id"] if vdi_info["content_id"] == "": vdi_info["content_id"] = util.gen_uuid() vdi_info["read_only"] = parent_info["read_only"] write_metadata(self.path, clone, vdi_info) return vdi_info
def exec(self, *args, **kwargs): try: username = args[0] subject = kwargs['subject'] content = kwargs['content'] except (IndexError, KeyError): raise BadArgsException try: user = User.get('username', username) except ObjectNotExist: self.write('{} does not exist.'.format(username)) return content = content.replace('<br>', '\n') uuid = gen_uuid() Mail.create(self.user, user, 'mail-{}'.format(uuid), subject) self.write( json.dumps({ 'username': username, 'uuid': 'mail-{}'.format(uuid), 'subject': subject, 'content': content }))
def probe(self): self.uuid = util.gen_uuid() # When multipathing is enabled, since we don't refcount the multipath maps, # we should not attempt to do the iscsi.attach/detach when the map is already present, # as this will remove it (which may well be in use). if self.mpath == 'true' and self.dconf.has_key('SCSIid'): maps = [] mpp_lun = False try: if (mpp_luncheck.is_RdacLun(self.dconf['SCSIid'])): mpp_lun = True link = glob.glob('/dev/disk/mpInuse/%s-*' % self.dconf['SCSIid']) else: maps = mpath_cli.list_maps() except: pass if (mpp_lun): if (len(link)): raise xs_errors.XenError('SRInUse') else: if self.dconf['SCSIid'] in maps: raise xs_errors.XenError('SRInUse') self.iscsi.attach(self.uuid) if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid): util.SMlog("Unable to detect LUN") raise xs_errors.XenError('InvalidDev') self._pathrefresh(LVHDoISCSISR) out = LVHDSR.LVHDSR.probe(self) self.iscsi.detach(self.uuid) return out
def spaceAvailableForVdis(self, count): try: created = False try: # The easiest way to do this, is to create a dummy vdi and write it uuid = util.gen_uuid() vdi_info = { UUID_TAG: uuid, NAME_LABEL_TAG: 'dummy vdi for space check', NAME_DESCRIPTION_TAG: 'dummy vdi for space check', IS_A_SNAPSHOT_TAG: 0, SNAPSHOT_OF_TAG: '', SNAPSHOT_TIME_TAG: '', TYPE_TAG: 'user', VDI_TYPE_TAG: 'vhd', READ_ONLY_TAG: 0, MANAGED_TAG: 0, 'metadata_of_pool': '' } created = self.addVdiInternal(vdi_info) except IOError, e: raise finally: if created: # Now delete the dummy VDI created above self.deleteVdi(uuid) return
def parse(self): if len(sys.argv) <> 2: util.SMlog( "Failed to parse commandline; wrong number of arguments; argv = %s" % (repr(sys.argv))) raise xs_errors.XenError('BadRequest') # Debug logging of the actual incoming command from the caller. # util.SMlog( "" ) # util.SMlog( "SM.parse: DEBUG: args = %s,\n%s" % \ # ( sys.argv[0], \ # util.splitXmlText( util.hideMemberValuesInXmlParams( \ # sys.argv[1] ), showContd=True ) ), \ # priority=util.LOG_DEBUG ) try: params, methodname = xmlrpclib.loads(sys.argv[1]) self.cmd = methodname params = params[0] # expect a single struct self.params = params # params is a dictionary self.dconf = params['device_config'] if params.has_key('sr_uuid'): self.sr_uuid = params['sr_uuid'] if params.has_key('vdi_uuid'): self.vdi_uuid = params['vdi_uuid'] elif self.cmd == "vdi_create": self.vdi_uuid = util.gen_uuid() except Exception, e: util.SMlog( "Failed to parse commandline; exception = %s argv = %s" % (str(e), repr(sys.argv))) raise xs_errors.XenError('BadRequest')
def _db_introduce(self): uuid = util.default(self, "uuid", lambda: util.gen_uuid()) sm_config = util.default(self, "sm_config", lambda: {}) ty = util.default(self, "ty", lambda: "user") is_a_snapshot = util.default(self, "is_a_snapshot", lambda: False) metadata_of_pool = util.default(self, "metadata_of_pool", lambda: "OpaqueRef:NULL") snapshot_time = util.default(self, "snapshot_time", lambda: "19700101T00:00:00Z") snapshot_of = util.default(self, "snapshot_of", lambda: "OpaqueRef:NULL") vdi = self.sr.session.xenapi.VDI.db_introduce( uuid, self.label, self.description, self.sr.sr_ref, ty, self.shareable, self.read_only, {}, self.location, {}, sm_config, self.managed, str(self.size), str(self.utilisation), metadata_of_pool, is_a_snapshot, xmlrpclib.DateTime(snapshot_time), snapshot_of, ) return vdi
def delete(self, sr_uuid, vdi_uuid): util.SMlog("RBDVDI.delete for %s" % self.uuid) vdis = self.session.xenapi.SR.get_VDIs(self.sr.sr_ref) clones_uuids = set([]) has_a_snapshot = False has_a_clone = False for tmp_vdi in vdis: tmp_vdi_uuid = self.session.xenapi.VDI.get_uuid(tmp_vdi) tmp_sm_config = self.session.xenapi.VDI.get_sm_config(tmp_vdi) if tmp_sm_config.has_key("snapshot-of"): if tmp_sm_config["snapshot-of"] == vdi_uuid: has_a_snapshot = True elif tmp_sm_config.has_key("clone-of"): if tmp_sm_config["clone-of"] == vdi_uuid: has_a_clone = True clones_uuids.add(tmp_vdi_uuid) if has_a_snapshot == True: # reverting of VM snapshot self_vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) new_uuid = util.gen_uuid() self.snaps = self.session.xenapi.VDI.get_snapshots(self_vdi_ref) # renaming base image self._rename_image(vdi_uuid, new_uuid) for snap in self.snaps: util.SMlog("RBDVDI.delete set snapshot_of = %s for %s" % (self.uuid, self.session.xenapi.VDI.get_uuid(snap))) self.session.xenapi.VDI.add_to_sm_config( snap, 'new_uuid', new_uuid) self.session.xenapi.VDI.add_to_sm_config( snap, 'rollback', 'true') else: # deleting of VDI self_vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) self_sm_config = self.session.xenapi.VDI.get_sm_config( self_vdi_ref) if self_sm_config.has_key("snapshot-of"): if has_a_clone == True: for clone_uuid in clones_uuids: clone_vdi_ref = self.session.xenapi.VDI.get_by_uuid( vdi_uuid) self.session.xenapi.VDI.remove_from_sm_config( clone_vdi_ref, "clone-of") self._flatten_clone(clone_uuid) if self_sm_config.has_key("compose"): self._delete_snapshot(self_sm_config["compose_vdi1"], vdi_uuid) self._delete_vdi(self_sm_config["compose_vdi1"]) self.sr.forget_vdi(self_sm_config["compose_vdi1"]) else: self._delete_snapshot(self_sm_config["snapshot-of"], vdi_uuid) else: self._delete_vdi(vdi_uuid) self.size = int( self.session.xenapi.VDI.get_virtual_size(self_vdi_ref)) self.sr._updateStats(self.sr.uuid, -self.size) self._db_forget()
def __init__(self, services, block, blockList): Thread.__init__(self) self.clientid = str(util.gen_uuid()) self.proposer = Proposer(self.clientid, services, block, blockList) self.latency = 0 self.master = False dbg.dbg("Client %s" % self.clientid)
def parse(self): if len(sys.argv) <> 2: util.SMlog("Failed to parse commandline; wrong number of arguments; argv = %s" % (repr(sys.argv))) raise xs_errors.XenError('BadRequest') # Debug logging of the actual incoming command from the caller. # util.SMlog( "" ) # util.SMlog( "SM.parse: DEBUG: args = %s,\n%s" % \ # ( sys.argv[0], \ # util.splitXmlText( util.hideMemberValuesInXmlParams( \ # sys.argv[1] ), showContd=True ) ), \ # priority=syslog.LOG_DEBUG ) try: params, methodname = xmlrpclib.loads(sys.argv[1]) self.cmd = methodname params = params[0] # expect a single struct self.params = params # params is a dictionary self.dconf = params['device_config'] if params.has_key('sr_uuid'): self.sr_uuid = params['sr_uuid'] if params.has_key('vdi_uuid'): self.vdi_uuid = params['vdi_uuid'] elif self.cmd == "vdi_create": self.vdi_uuid = util.gen_uuid () except Exception, e: util.SMlog("Failed to parse commandline; exception = %s argv = %s" % (str(e), repr(sys.argv))) raise xs_errors.XenError('BadRequest')
def __init__(self, services, path): Thread.__init__(self) random.seed(time.time()) self.clientid = str(util.gen_uuid()) self.proposer = Proposer(self.clientid, services, path) self.latency = 0 self.master = False dbg.dbg("Client %s" % self.clientid)
def probe(self): self.uuid = util.gen_uuid() self.iscsi.attach(self.uuid) if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid): util.SMlog("Unable to detect LUN") raise xs_errors.XenError('InvalidDev') out = super(LVMoISCSISR, self).probe() self.iscsi.detach(self.uuid) return out
def cmd_init(metasync, args, opts): "initialize the repo (e.g., metasync init [namespace])" # namespace to avoid conflict ns = args[0] if len(args) > 0 else str(util.gen_uuid()) if not metasync.cmd_init(ns): dbg.err("Can't initialize the repository") return -1
def _db_introduce(self): uuid = util.default(self, "uuid", lambda: util.gen_uuid()) sm_config = util.default(self, "sm_config", lambda: {}) vdi = self.sr.session.xenapi.VDI.db_introduce(uuid, self.label, self.description, self.sr.sr_ref, "user", self.shareable, self.read_only, {}, self.location, {}, {}) self.sr.session.xenapi.VDI.set_sm_config(vdi, sm_config) self.sr.session.xenapi.VDI.set_managed(vdi, self.managed) self.sr.session.xenapi.VDI.set_virtual_size(vdi, str(self.size)) self.sr.session.xenapi.VDI.set_physical_utilisation(vdi, str(self.utilisation)) return vdi
def create(self, sr_uuid, size): try: # attach the device util.SMlog("Trying to attach iscsi disk") self.iscsi.attach(sr_uuid) if not self.iscsi.attached: raise xs_errors.XenError('SRNotAttached') util.SMlog("Attached iscsi disk at %s \n" % self.iscsi.path) try: # generate new UUIDs for VG and LVs old_vg_name = self._getVgName(self.dconf['device']) lvm_config_dict = self._getLvmInfo(old_vg_name) lvUuidMap = {} # Maps old lv uuids to new uuids for lv_name in lvm_config_dict[old_vg_name]['logical_volumes']: if lv_name == MDVOLUME_NAME: continue oldUuid = lv_name[4:] # remove the VHD- lvUuidMap[oldUuid] = util.gen_uuid() new_vg_name = VG_PREFIX + sr_uuid self._resignLvm(sr_uuid, old_vg_name, lvUuidMap, lvm_config_dict) # causes creation of nodes and activates the lvm volumes LVHDSR.LVHDSR.load(self, sr_uuid) new_vdi_info = self._resignSrMetadata(new_vg_name, self.uuid, lvUuidMap) self._resignVdis(new_vg_name, lvUuidMap) self._deleteAllSnapshots(new_vdi_info) # Detach LVM self.lvmCache.deactivateNoRefcount(MDVOLUME_NAME) for newUuid in lvUuidMap.values(): new_lv_name = self.LV_VHD_PREFIX + newUuid self.lvmCache.deactivateNoRefcount(new_lv_name) except: util.logException("RESIGN_CREATE") raise finally: iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN, all=True) raise xs_errors.XenError( "The SR has been successfully resigned. Use the lvmoiscsi type to attach it" )
def _loadvdis(self): """Scan the location directory.""" if self.vdis: return try: for name in util.listdir(self.dconf['location']): if name != "": self.vdis[name] = SHMVDI(self, util.gen_uuid(), name) except: pass
def deactivate(self, vdi): meta = read_metadata(self.path, vdi) if meta["content_id"] == "": meta["content_id"] = util.gen_uuid() write_metadata(self.path, vdi, meta) data = meta["data"] if data not in self.tapdisks: raise Backend_error("VDI_NOT_ATTACHED", [ vdi ]) self.tapdisks[data].close() self.open_dummy_vhd(meta, self.tapdisks[data])
def create(self, sr_uuid, size): try: # attach the device util.SMlog("Trying to attach iscsi disk") self.iscsi.attach(sr_uuid) if not self.iscsi.attached: raise xs_errors.XenError('SRNotAttached') util.SMlog("Attached iscsi disk at %s \n" % self.iscsi.path) try: # generate new UUIDs for VG and LVs old_vg_name = self._getVgName(self.dconf['device']) lvm_config_dict = self._getLvmInfo(old_vg_name) lvUuidMap = {} # Maps old lv uuids to new uuids for lv_name in lvm_config_dict[old_vg_name]['logical_volumes']: if lv_name == MDVOLUME_NAME: continue oldUuid = lv_name[4:] # remove the VHD- lvUuidMap[oldUuid] = util.gen_uuid() new_vg_name = VG_PREFIX + sr_uuid self._resignLvm(sr_uuid, old_vg_name, lvUuidMap, lvm_config_dict) # causes creation of nodes and activates the lvm volumes LVHDSR.LVHDSR.load(self, sr_uuid) new_vdi_info = self._resignSrMetadata(new_vg_name, self.uuid, lvUuidMap) self._resignVdis(new_vg_name, lvUuidMap) self._deleteAllSnapshots(new_vdi_info) # Detach LVM self.lvmCache.deactivateNoRefcount(MDVOLUME_NAME) for newUuid in lvUuidMap.values(): new_lv_name = self.LV_VHD_PREFIX + newUuid self.lvmCache.deactivateNoRefcount(new_lv_name) except: util.logException("RESIGN_CREATE") raise finally: iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN, all=True) raise xs_errors.XenError("The SR has been successfully resigned. Use the lvmoiscsi type to attach it")
def clone(self, sr_uuid, vdi_uuid): self.sr._assertValues(['sr_uuid','args','host_ref','device_config','command','sr_ref']) assert(len(self.sr.srcmd.params['args'])==0) dest = util.gen_uuid() vdi = VDI.VDI(self.sr, dest) vdi.read_only = False vdi.location = dest vdi.size = 0 vdi.utilisation = 0 vdi._db_introduce() self.run_corner_cases_tests() return vdi.get_params()
def probe(self): # N.B. There are no SR references self._assertValues(['args','host_ref','session_ref','device_config','command']) assert(len(self.srcmd.params['args'])==0) # Create some Dummy SR records entry = {} entry['size'] = 1024 SRlist = {} SRlist[util.gen_uuid()] = entry # Return the Probe XML return util.SRtoXML(SRlist)
def delete(self, sr_uuid, vdi_uuid): util.SMlog("RBDVDI.delete for %s" % self.uuid) vdis = self.session.xenapi.SR.get_VDIs(self.sr.sr_ref) clones_uuids = set([]) has_a_snapshot = False has_a_clone = False for tmp_vdi in vdis: tmp_vdi_uuid = self.session.xenapi.VDI.get_uuid(tmp_vdi) tmp_sm_config = self.session.xenapi.VDI.get_sm_config(tmp_vdi) if tmp_sm_config.has_key("snapshot-of"): if tmp_sm_config["snapshot-of"] == vdi_uuid: has_a_snapshot = True elif tmp_sm_config.has_key("clone-of"): if tmp_sm_config["clone-of"] == vdi_uuid: has_a_clone = True clones_uuids.add(tmp_vdi_uuid) if has_a_snapshot == True: # reverting of VM snapshot self_vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) new_uuid = util.gen_uuid() self.snaps = self.session.xenapi.VDI.get_snapshots(self_vdi_ref) # renaming base image self._rename_image(vdi_uuid, new_uuid) for snap in self.snaps: util.SMlog("RBDVDI.delete set snapshot_of = %s for %s" % (self.uuid, self.session.xenapi.VDI.get_uuid(snap))) self.session.xenapi.VDI.add_to_sm_config(snap, 'new_uuid', new_uuid) self.session.xenapi.VDI.add_to_sm_config(snap, 'rollback', 'true') else: # deleting of VDI self_vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) self_sm_config = self.session.xenapi.VDI.get_sm_config(self_vdi_ref) if self_sm_config.has_key("snapshot-of"): if has_a_clone == True: for clone_uuid in clones_uuids: clone_vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) self.session.xenapi.VDI.remove_from_sm_config(clone_vdi_ref, "clone-of") self._flatten_clone(clone_uuid) if self_sm_config.has_key("compose"): self._delete_snapshot(self_sm_config["compose_vdi1"], vdi_uuid) self._delete_vdi(self_sm_config["compose_vdi1"]) self.sr.forget_vdi(self_sm_config["compose_vdi1"]) else: self._delete_snapshot(self_sm_config["snapshot-of"], vdi_uuid) else: self._delete_vdi(vdi_uuid) self.size = int(self.session.xenapi.VDI.get_virtual_size(self_vdi_ref)) self.sr._updateStats(self.sr.uuid, -self.size) self._db_forget()
def _db_introduce(self): uuid = util.default(self, "uuid", lambda: util.gen_uuid()) sm_config = util.default(self, "sm_config", lambda: {}) if self.sr.srcmd.params.has_key("vdi_sm_config"): for key in SM_CONFIG_PASS_THROUGH_FIELDS: val = self.sr.srcmd.params["vdi_sm_config"].get(key) if val: sm_config[key] = val ty = util.default(self, "ty", lambda: "user") is_a_snapshot = util.default(self, "is_a_snapshot", lambda: False) metadata_of_pool = util.default(self, "metadata_of_pool", lambda: "OpaqueRef:NULL") snapshot_time = util.default(self, "snapshot_time", lambda: "19700101T00:00:00Z") snapshot_of = util.default(self, "snapshot_of", lambda: "OpaqueRef:NULL") vdi = self.sr.session.xenapi.VDI.db_introduce(uuid, self.label, self.description, self.sr.sr_ref, ty, self.shareable, self.read_only, {}, self.location, {}, sm_config, self.managed, str(self.size), str(self.utilisation), metadata_of_pool, is_a_snapshot, xmlrpclib.DateTime(snapshot_time), snapshot_of) return vdi
def _snapshot(self, sr_uuid, vdi_uuid): util.SMlog("RBDVDI._snapshot: sr_uuid=%s, vdi_uuid=%s" % (sr_uuid, vdi_uuid)) #secondary = None #if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): # raise util.SMException("failed to pause VDI %s" % vdi_uuid) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) base_uuid = vdi_uuid snap_uuid = util.gen_uuid() vdi_meta = self._get_vdi_meta(vdi_uuid) if vdi_meta.has_key('VDI_LABEL'): orig_label = vdi_meta['VDI_LABEL'] else: orig_label = '' snapVDI = RBDVDI(self.sr, snap_uuid, "%s%s" % (orig_label, " (snapshot)")) self._do_snapshot(base_uuid, snap_uuid) #snapVDI.path = self.sr._get_snap_path(base_uuid, snap_uuid) snapVDI.path = self.sr._get_path(snap_uuid) snapVDI.issnap = True snapVDI.read_only = True snapVDI.location = snapVDI.uuid snapVDI.snapshot_of = vdi_ref snapVDI.size = self.session.xenapi.VDI.get_virtual_size(vdi_ref) snapVDI.sm_config["vdi_type"] = 'aio' snapVDI.sm_config["snapshot-of"] = base_uuid snap_vdi_ref = snapVDI._db_introduce() self.session.xenapi.VDI.set_physical_utilisation( snap_vdi_ref, self.session.xenapi.VDI.get_physical_utilisation(vdi_ref)) self.session.xenapi.VDI.set_virtual_size( snap_vdi_ref, self.session.xenapi.VDI.get_virtual_size(vdi_ref)) self.size = int(self.session.xenapi.VDI.get_virtual_size(vdi_ref)) self.sr._updateStats(self.sr.uuid, self.size) #blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) return snapVDI
def clone(self, sr_uuid, vdi_uuid, dest): args = [] args.append("vdi_clone") args.append(sr_uuid) args.append(vdi_uuid) args.append(dest) # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() if (sr_size - reserved) < \ ((self.size + VDI.VDIMetadataSize(SR.DEFAULT_TAP, self.size))*2): raise xs_errors.XenError('SRNoSpace') newuuid = util.gen_uuid() src = self.path dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) if not self._checkpath(src): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI %s unavailable %s' % (vdi_uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: try: util.ioretry(lambda: os.rename(src, newsrc)) except util.CommandException, inst: if inst.code != errno.ENOENT: self._clonecleanup(src, dst, newsrc) util.end_log_entry(self.sr.path, self.path, ["error"]) raise try: util.ioretry(lambda: self._dualsnap(src, dst, newsrc)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) except util.CommandException, inst: if inst.code != errno.EIO: self._clonecleanup(src, dst, newsrc) util.end_log_entry(self.sr.path, self.path, ["error"]) raise
def _db_introduce(self): uuid = util.default(self, "uuid", lambda: util.gen_uuid()) sm_config = util.default(self, "sm_config", lambda: {}) ty = util.default(self, "ty", lambda: "user") is_a_snapshot = util.default(self, "is_a_snapshot", lambda: False) metadata_of_pool = util.default(self, "metadata_of_pool", lambda: "OpaqueRef:NULL") snapshot_time = util.default(self, "snapshot_time", lambda: "19700101T00:00:00Z") snapshot_of = util.default(self, "snapshot_of", lambda: "OpaqueRef:NULL") vdi = self.sr.session.xenapi.VDI.db_introduce( uuid, self.label, self.description, self.sr.sr_ref, ty, self.shareable, self.read_only, {}, self.location, {}, sm_config, self.managed, str(self.size), str(self.utilisation), metadata_of_pool, is_a_snapshot, xmlrpclib.DateTime(snapshot_time), snapshot_of) return vdi
def exec(self, *args, **kwargs): try: bname = args[0] title = kwargs['title'] content = kwargs['content'] except (IndexError, KeyError): raise BadArgsException try: board = Board.get('name', bname) except ObjectNotExist: self.write('Board does not exist.') return uuid = 'post-{}'.format(gen_uuid()) content = content.replace('<br>', '\n') Post.create(board.id, title, uuid, self.user) self.write(json.dumps({'uuid': uuid, 'content': content}))
def _snapshot(self, sr_uuid, vdi_uuid): util.SMlog("RBDVDI.snapshot for %s" % (vdi_uuid)) #secondary = None #if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): # raise util.SMException("failed to pause VDI %s" % vdi_uuid) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) base_uuid = vdi_uuid snap_uuid = util.gen_uuid() vdi_info = self._get_vdi_info(vdi_uuid) if vdi_info.has_key('VDI_LABEL'): orig_label = vdi_info['VDI_LABEL'] else: orig_label = '' snapVDI = RBDVDI(self.sr, snap_uuid, "%s%s" % (orig_label, " (snapshot)")) self._do_snapshot(base_uuid, snap_uuid) snapVDI.path = self.sr._get_snap_path(base_uuid, snap_uuid) snapVDI.issnap = True snapVDI.read_only = True snapVDI.location = snapVDI.uuid snapVDI.snapshot_of = vdi_ref snapVDI.size = self.session.xenapi.VDI.get_virtual_size(vdi_ref) snapVDI.sm_config["vdi_type"] = 'aio' snapVDI.sm_config["snapshot-of"] = base_uuid snap_vdi_ref = snapVDI._db_introduce() self.session.xenapi.VDI.set_physical_utilisation(snap_vdi_ref, self.session.xenapi.VDI.get_physical_utilisation(vdi_ref)) self.session.xenapi.VDI.set_virtual_size(snap_vdi_ref, self.session.xenapi.VDI.get_virtual_size(vdi_ref)) self.size = int(self.session.xenapi.VDI.get_virtual_size(vdi_ref)) self.sr._updateStats(self.sr.uuid, self.size) #blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) return snapVDI
def parse(self): if len(sys.argv) <> 2: util.SMlog("Failed to parse commandline; wrong number of arguments; argv = %s" % (repr(sys.argv))) raise xs_errors.XenError("BadRequest") try: params, methodname = xmlrpclib.loads(sys.argv[1]) self.cmd = methodname params = params[0] # expect a single struct self.params = params # params is a dictionary self.dconf = params["device_config"] if params.has_key("sr_uuid"): self.sr_uuid = params["sr_uuid"] if params.has_key("vdi_uuid"): self.vdi_uuid = params["vdi_uuid"] elif self.cmd == "vdi_create": self.vdi_uuid = util.gen_uuid() except Exception, e: util.SMlog("Failed to parse commandline; exception = %s argv = %s" % (str(e), repr(sys.argv))) raise xs_errors.XenError("BadRequest")
def load(self, sr_uuid): if not sr_uuid: # This is a probe call, generate a temp sr_uuid sr_uuid = util.gen_uuid() driver = SR.driver('iscsi') self.iscsi = driver(self.original_srcmd, sr_uuid) # Be extremely careful not to throw exceptions here since this function # is the main one used by all operations including probing and creating pbd = None try: pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) except: pass if not self.dconf.has_key('SCSIid') and self.dconf.has_key( 'LUNid') and pbd <> None: # UPGRADE FROM RIO: add SCSIid key to device_config util.SMlog("Performing upgrade from Rio") scsiid = self._getSCSIid_from_LUN(sr_uuid) device_config = self.session.xenapi.PBD.get_device_config(pbd) device_config['SCSIid'] = scsiid device_config['upgraded_from_rio'] = 'true' self.session.xenapi.PBD.set_device_config(pbd, device_config) self.dconf['SCSIid'] = scsiid # Apart from the upgrade case, user must specify a SCSIid if not self.dconf.has_key('SCSIid'): self._LUNprint(sr_uuid) raise xs_errors.XenError('ConfigSCSIid') self.SCSIid = self.dconf['SCSIid'] self._pathrefresh(LVMoISCSISR) super(LVMoISCSISR, self).load(sr_uuid)
def _db_introduce(self): uuid = util.default(self, "uuid", lambda: util.gen_uuid()) sm_config = util.default(self, "sm_config", lambda: {}) if self.sr.srcmd.params.has_key("vdi_sm_config"): for key in SM_CONFIG_PASS_THROUGH_FIELDS: val = self.sr.srcmd.params["vdi_sm_config"].get(key) if val: sm_config[key] = val ty = util.default(self, "ty", lambda: "user") is_a_snapshot = util.default(self, "is_a_snapshot", lambda: False) metadata_of_pool = util.default(self, "metadata_of_pool", lambda: "OpaqueRef:NULL") snapshot_time = util.default(self, "snapshot_time", lambda: "19700101T00:00:00Z") snapshot_of = util.default(self, "snapshot_of", lambda: "OpaqueRef:NULL") vdi = self.sr.session.xenapi.VDI.db_introduce( uuid, self.label, self.description, self.sr.sr_ref, ty, self.shareable, self.read_only, {}, self.location, {}, sm_config, self.managed, str(self.size), str(self.utilisation), metadata_of_pool, is_a_snapshot, xmlrpclib.DateTime(snapshot_time), snapshot_of) return vdi
def exec(self, *args): if len(args) < 2: raise BadArgsException content = self.raw_command.strip() content = content[8:].strip() content = content[len(args[0]):].strip() try: p = Post.get('id', args[0]) except ObjectNotExist: self.write('Post does not exist.') return uuid = 'comment-{}'.format(gen_uuid()) username = User.get('id', p.author_id).username Comment.create(p, self.user, uuid) self.write( json.dumps({ 'username': username, 'uuid': uuid, 'content': content }))
def load(self, sr_uuid): if not sr_uuid: # This is a probe call, generate a temp sr_uuid sr_uuid = util.gen_uuid() driver = SR.driver('iscsi') self.iscsi = driver(self.original_srcmd, sr_uuid) # Be extremely careful not to throw exceptions here since this function # is the main one used by all operations including probing and creating pbd = None try: pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) except: pass if not self.dconf.has_key('SCSIid') and self.dconf.has_key('LUNid') and pbd <> None: # UPGRADE FROM RIO: add SCSIid key to device_config util.SMlog("Performing upgrade from Rio") scsiid = self._getSCSIid_from_LUN(sr_uuid) device_config = self.session.xenapi.PBD.get_device_config(pbd) device_config['SCSIid'] = scsiid device_config['upgraded_from_rio'] = 'true' self.session.xenapi.PBD.set_device_config(pbd, device_config) self.dconf['SCSIid'] = scsiid # Apart from the upgrade case, user must specify a SCSIid if not self.dconf.has_key('SCSIid'): self._LUNprint(sr_uuid) raise xs_errors.XenError('ConfigSCSIid') self.SCSIid = self.dconf['SCSIid'] self._pathrefresh(LVMoISCSISR) super(LVMoISCSISR, self).load(sr_uuid)
def load(self, sr_uuid): if not sr_uuid: # This is a probe call, generate a temp sr_uuid sr_uuid = util.gen_uuid() driver = SR.driver('iscsi') if self.original_srcmd.dconf.has_key('target'): self.original_srcmd.dconf['targetlist'] = self.original_srcmd.dconf['target'] iscsi = driver(self.original_srcmd, sr_uuid) self.iscsiSRs = [] self.iscsiSRs.append(iscsi) if self.dconf['target'].find(',') == 0 or self.dconf['targetIQN'] == "*": # Instantiate multiple sessions self.iscsiSRs = [] if self.dconf['targetIQN'] == "*": IQN = "any" else: IQN = self.dconf['targetIQN'] dict = {} try: if self.dconf.has_key('multiSession'): IQNs = self.dconf['multiSession'].split("|") for IQN in IQNs: if IQN: dict[IQN] = "" else: for tgt in self.dconf['target'].split(','): try: tgt_ip = util._convertDNS(tgt) except: raise xs_errors.XenError('DNSError') iscsilib.ensure_daemon_running_ok(iscsi.localIQN) map = iscsilib.discovery(tgt_ip,iscsi.port,iscsi.chapuser,iscsi.chappassword,targetIQN=IQN) util.SMlog("Discovery for IP %s returned %s" % (tgt,map)) for i in range(0,len(map)): (portal,tpgt,iqn) = map[i] (ipaddr,port) = portal.split(',')[0].split(':') key = "%s,%s,%s" % (ipaddr,port,iqn) dict[key] = "" # Compose the IQNstring first IQNstring = "" for key in dict.iterkeys(): IQNstring += "%s|" % key # Now load the individual iSCSI base classes for key in dict.iterkeys(): (ipaddr,port,iqn) = key.split(',') srcmd_copy = copy.deepcopy(self.original_srcmd) srcmd_copy.dconf['target'] = ipaddr srcmd_copy.dconf['targetIQN'] = iqn srcmd_copy.dconf['multiSession'] = IQNstring util.SMlog("Setting targetlist: %s" % srcmd_copy.dconf['targetlist']) self.iscsiSRs.append(driver(srcmd_copy, sr_uuid)) pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) if pbd <> None and not self.dconf.has_key('multiSession'): dconf = self.session.xenapi.PBD.get_device_config(pbd) dconf['multiSession'] = IQNstring self.session.xenapi.PBD.set_device_config(pbd, dconf) except: util.logException("LVHDoISCSISR.load") self.iscsi = self.iscsiSRs[0] # Be extremely careful not to throw exceptions here since this function # is the main one used by all operations including probing and creating pbd = None try: pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) except: pass if not self.dconf.has_key('SCSIid') and self.dconf.has_key('LUNid') and pbd <> None: # UPGRADE FROM RIO: add SCSIid key to device_config util.SMlog("Performing upgrade from Rio") scsiid = self._getSCSIid_from_LUN(sr_uuid) device_config = self.session.xenapi.PBD.get_device_config(pbd) device_config['SCSIid'] = scsiid device_config['upgraded_from_rio'] = 'true' self.session.xenapi.PBD.set_device_config(pbd, device_config) self.dconf['SCSIid'] = scsiid # Apart from the upgrade case, user must specify a SCSIid if not self.dconf.has_key('SCSIid'): self._LUNprint(sr_uuid) raise xs_errors.XenError('ConfigSCSIid') self.SCSIid = self.dconf['SCSIid'] self._pathrefresh(LVHDoISCSISR) LVHDSR.LVHDSR.load(self, sr_uuid)
def gen_rdmfile(): return "/tmp/%s" % util.gen_uuid()
def clone(self, sr_uuid, snap_uuid): util.SMlog("RBDVDI.clone for %s snapshot"% (snap_uuid)) snap_vdi_ref = self.session.xenapi.VDI.get_by_uuid(snap_uuid) if self.session.xenapi.VDI.get_sharable(snap_vdi_ref): return snap_vdi_ref.get_params() snap_sm_config = self.session.xenapi.VDI.get_sm_config(snap_vdi_ref) if snap_sm_config.has_key("snapshot-of"): base_uuid = snap_sm_config["snapshot-of"] else: snapVDI = self._snapshot(sr_uuid, snap_uuid) base_uuid = snap_uuid snap_uuid = snapVDI.uuid self.sr.scan(self.sr.uuid) util.SMlog("RBDVDI.clone base_uuid = %s"% (base_uuid)) if snap_sm_config.has_key("rollback"): if snap_sm_config["rollback"] == 'true': util.SMlog("RBDVDI.clone reverting %s to %s"% (snap_uuid, base_uuid)) # executing rollback of snapshot (reverting VM to snapshot) new_uuid = snap_sm_config["new_uuid"] self._rollback_snapshot(new_uuid, snap_uuid) baseVDI = RBDVDI(self.sr, new_uuid, self.session.xenapi.VDI.get_name_label(snap_vdi_ref)) baseVDI.path = self.sr._get_path(new_uuid) baseVDI.location = baseVDI.uuid baseVDI.size = self.session.xenapi.VDI.get_virtual_size(snap_vdi_ref) baseVDI.sm_config["vdi_type"] = 'aio' baseVDI.sm_config["reverted"] = 'true' base_vdi_ref = baseVDI._db_introduce() vdis = self.session.xenapi.SR.get_VDIs(self.sr.sr_ref) for tmp_vdi in vdis: tmp_vdi_uuid = self.session.xenapi.VDI.get_uuid(tmp_vdi) tmp_sm_config = self.session.xenapi.VDI.get_sm_config(tmp_vdi) if tmp_sm_config.has_key("rollback"): if tmp_sm_config.has_key("new_uuid"): if tmp_sm_config["new_uuid"] == new_uuid: sm_config = self.session.xenapi.VDI.get_sm_config(tmp_vdi) del sm_config['snapshot-of'] sm_config['snapshot-of'] = new_uuid del sm_config['rollback'] del sm_config['new_uuid'] self.session.xenapi.VDI.set_sm_config(tmp_vdi, sm_config) return baseVDI.get_params() else: base_vdi_info = self._get_vdi_info(base_uuid) base_vdi_ref = self.session.xenapi.VDI.get_by_uuid(base_uuid) if base_vdi_info.has_key('VDI_LABEL'): base_vdi_label = base_vdi_info['VDI_LABEL'] else: base_vdi_label = '' clone_uuid = util.gen_uuid() cloneVDI = RBDVDI(self.sr, clone_uuid, base_vdi_label) self._do_clone(base_uuid, snap_uuid, clone_uuid, base_vdi_label) cloneVDI.path = self.sr._get_path(clone_uuid) cloneVDI.location = cloneVDI.uuid cloneVDI.sm_config["vdi_type"] = 'aio' cloneVDI.sm_config["clone-of"] = snap_uuid clone_vdi_ref = cloneVDI._db_introduce() self.session.xenapi.VDI.set_physical_utilisation(clone_vdi_ref, self.session.xenapi.VDI.get_physical_utilisation(base_vdi_ref)) self.session.xenapi.VDI.set_virtual_size(clone_vdi_ref, self.session.xenapi.VDI.get_virtual_size(base_vdi_ref)) self.sr._updateStats(self.sr.uuid, self.session.xenapi.VDI.get_virtual_size(base_vdi_ref)) return cloneVDI.get_params()
def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) args = [] args.append("vdi_clone") args.append(self.sr.uuid) args.append(self.uuid) dest = None dst = None if snap_type == VDI.SNAPSHOT_DOUBLE: dest = util.gen_uuid() dst = os.path.join(self.sr.path, "%s.%s" % (dest,self.vdi_type)) args.append(dest) if self.hidden: raise xs_errors.XenError('VDIClone', opterr='hidden VDI') depth = vhdutil.getDepth(self.path) if depth == -1: raise xs_errors.XenError('VDIUnavailable', \ opterr='failed to get VHD depth') elif depth >= vhdutil.MAX_CHAIN_SIZE: raise xs_errors.XenError('SnapshotChainTooLong') # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() num_vdis = 2 if (snap_type == VDI.SNAPSHOT_SINGLE or snap_type == VDI.SNAPSHOT_INTERNAL): num_vdis = 1 if (sr_size - reserved) < ((self.size + VDI.VDIMetadataSize( \ vhdutil.VDI_TYPE_VHD, self.size)) * num_vdis): raise xs_errors.XenError('SRNoSpace') newuuid = util.gen_uuid() src = self.path newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid,self.vdi_type)) newsrcname = "%s.%s" % (newuuid,self.vdi_type) if not self._checkpath(src): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI %s unavailable %s' % (self.uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: util.ioretry(lambda: os.rename(src,newsrc)) # Create the snapshot under a temporary name, then rename # it afterwards. This avoids a small window where it exists # but is invalid. We do not need to do this for # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed # before so nobody will try to query it. tmpsrc = "%s.%s" % (src, "new") util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) util.ioretry(lambda: os.rename(tmpsrc, src)) if snap_type == VDI.SNAPSHOT_DOUBLE: util.ioretry(lambda: self._snap(dst, newsrcname)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) #Verify parent locator field of both children and delete newsrc if unused introduce_parent = True try: srcparent = util.ioretry(lambda: self._query_p_uuid(src)) dstparent = None if snap_type == VDI.SNAPSHOT_DOUBLE: dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) if srcparent != newuuid and \ (snap_type == VDI.SNAPSHOT_SINGLE or \ snap_type == VDI.SNAPSHOT_INTERNAL or \ dstparent != newuuid): util.ioretry(lambda: os.unlink(newsrc)) introduce_parent = False except: pass # Introduce the new VDI records leaf_vdi = None if snap_type == VDI.SNAPSHOT_DOUBLE: leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI leaf_vdi.read_only = False leaf_vdi.location = dest leaf_vdi.size = self.size leaf_vdi.utilisation = self.utilisation leaf_vdi.sm_config = {} leaf_vdi.sm_config['vhd-parent'] = dstparent # If the parent is encrypted set the key_hash # for the new snapshot disk vdi_ref = self.sr.srcmd.params['vdi_ref'] sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if "key_hash" in sm_config: leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] # If we have CBT enabled on the VDI, # set CBT status for the new snapshot disk if cbtlog: leaf_vdi.cbt_enabled = True base_vdi = None if introduce_parent: base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent base_vdi.label = "base copy" base_vdi.read_only = True base_vdi.location = newuuid base_vdi.size = self.size base_vdi.utilisation = self.utilisation base_vdi.sm_config = {} grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) if grandparent.find("no parent") == -1: base_vdi.sm_config['vhd-parent'] = grandparent try: if snap_type == VDI.SNAPSHOT_DOUBLE: leaf_vdi_ref = leaf_vdi._db_introduce() util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ (leaf_vdi_ref,dest)) if introduce_parent: base_vdi_ref = base_vdi._db_introduce() self.session.xenapi.VDI.set_managed(base_vdi_ref, False) util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref,newuuid)) vdi_ref = self.sr.srcmd.params['vdi_ref'] sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) sm_config['vhd-parent'] = srcparent self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) except Exception, e: util.SMlog("vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) # Note it's too late to actually clean stuff up here: the base disk has # been marked as deleted already. util.end_log_entry(self.sr.path, self.path, ["error"]) raise except util.CommandException, inst: # XXX: it might be too late if the base disk has been marked as deleted! self._clonecleanup(src,dst,newsrc) util.end_log_entry(self.sr.path, self.path, ["error"]) raise xs_errors.XenError('VDIClone', opterr='VDI clone failed error %d' % inst.code)
def load(self, sr_uuid): if not sr_uuid: # This is a probe call, generate a temp sr_uuid sr_uuid = util.gen_uuid() driver = SR.driver('iscsi') if self.original_srcmd.dconf.has_key('target'): self.original_srcmd.dconf[ 'targetlist'] = self.original_srcmd.dconf['target'] iscsi = driver(self.original_srcmd, sr_uuid) self.iscsiSRs = [] self.iscsiSRs.append(iscsi) if self.dconf['target'].find( ',') == 0 or self.dconf['targetIQN'] == "*": # Instantiate multiple sessions self.iscsiSRs = [] if self.dconf['targetIQN'] == "*": IQN = "any" else: IQN = self.dconf['targetIQN'] dict = {} IQNstring = "" IQNs = [] try: if self.dconf.has_key('multiSession'): IQNs = self.dconf['multiSession'].split("|") for IQN in IQNs: if IQN: dict[IQN] = "" else: try: IQNs.remove(IQN) except: # Exceptions are not expected but just in case pass # Order in multiSession must be preserved. It is important for dual-controllers. # IQNstring cannot be built with a dictionary iteration because of this IQNstring = self.dconf['multiSession'] else: for tgt in self.dconf['target'].split(','): try: tgt_ip = util._convertDNS(tgt) except: raise xs_errors.XenError('DNSError') iscsilib.ensure_daemon_running_ok(iscsi.localIQN) map = iscsilib.discovery(tgt_ip, iscsi.port, iscsi.chapuser, iscsi.chappassword, targetIQN=IQN) util.SMlog("Discovery for IP %s returned %s" % (tgt, map)) for i in range(0, len(map)): (portal, tpgt, iqn) = map[i] (ipaddr, port) = iscsilib.parse_IP_port(portal) try: util._testHost(ipaddr, long(port), 'ISCSITarget') except: util.SMlog("Target Not reachable: (%s:%s)" % (ipaddr, port)) continue key = "%s,%s,%s" % (ipaddr, port, iqn) dict[key] = "" # Again, do not mess up with IQNs order. Dual controllers will benefit from that if IQNstring == "": # Compose the IQNstring first for key in dict.iterkeys(): IQNstring += "%s|" % key # Reinitialize and store iterator key_iterator = dict.iterkeys() else: key_iterator = IQNs # Now load the individual iSCSI base classes for key in key_iterator: (ipaddr, port, iqn) = key.split(',') srcmd_copy = copy.deepcopy(self.original_srcmd) srcmd_copy.dconf['target'] = ipaddr srcmd_copy.dconf['targetIQN'] = iqn srcmd_copy.dconf['multiSession'] = IQNstring util.SMlog("Setting targetlist: %s" % srcmd_copy.dconf['targetlist']) self.iscsiSRs.append(driver(srcmd_copy, sr_uuid)) pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) if pbd <> None and not self.dconf.has_key('multiSession'): dconf = self.session.xenapi.PBD.get_device_config(pbd) dconf['multiSession'] = IQNstring self.session.xenapi.PBD.set_device_config(pbd, dconf) except: util.logException("OCFSoISCSISR.load") self.iscsi = self.iscsiSRs[0] # Be extremely careful not to throw exceptions here since this function # is the main one used by all operations including probing and creating pbd = None try: pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) except: pass # Apart from the upgrade case, user must specify a SCSIid if not self.dconf.has_key('SCSIid'): # Dual controller issue self.LUNs = {} # Dict for LUNs from all the iscsi objects for ii in range(0, len(self.iscsiSRs)): self.iscsi = self.iscsiSRs[ii] self._LUNprint(sr_uuid) for key in self.iscsi.LUNs: self.LUNs[key] = self.iscsi.LUNs[key] self.print_LUNs_XML() self.iscsi = self.iscsiSRs[0] # back to original value raise xs_errors.XenError('ConfigSCSIid') self.SCSIid = self.dconf['SCSIid'] # This block checks if the first iscsi target contains the right SCSIid. # If not it scans the other iscsi targets because chances are that more # than one controller is present dev_match = False forced_login = False # No need to check if only one iscsi target is present if len(self.iscsiSRs) == 1: pass else: target_success = False attempt_discovery = False for iii in range(0, len(self.iscsiSRs)): # Check we didn't leave any iscsi session open # If exceptions happened before, the cleanup function has worked on the right target. if forced_login == True: try: iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN) iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN) forced_login = False except: raise xs_errors.XenError('ISCSILogout') self.iscsi = self.iscsiSRs[iii] util.SMlog("path %s" % self.iscsi.path) util.SMlog("iscsci data: targetIQN %s, portal %s" % (self.iscsi.targetIQN, self.iscsi.target)) iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN) if not iscsilib._checkTGT(self.iscsi.targetIQN): attempt_discovery = True try: # Ensure iscsi db has been populated map = iscsilib.discovery( self.iscsi.target, self.iscsi.port, self.iscsi.chapuser, self.iscsi.chappassword, targetIQN=self.iscsi.targetIQN) if len(map) == 0: util.SMlog( "Discovery for iscsi data targetIQN %s," " portal %s returned empty list" " Trying another path if available" % (self.iscsi.targetIQN, self.iscsi.target)) continue except: util.SMlog("Discovery failed for iscsi data targetIQN" " %s, portal %s. Trying another path if" " available" % (self.iscsi.targetIQN, self.iscsi.target)) continue try: iscsilib.login(self.iscsi.target, self.iscsi.targetIQN, self.iscsi.chapuser, self.iscsi.chappassword, self.iscsi.incoming_chapuser, self.iscsi.incoming_chappassword, self.mpath == "true") except: util.SMlog("Login failed for iscsi data targetIQN %s," " portal %s. Trying another path" " if available" % (self.iscsi.targetIQN, self.iscsi.target)) continue target_success = True forced_login = True # A session should be active. if not util.wait_for_path(self.iscsi.path, ISCSISR.MAX_TIMEOUT): util.SMlog("%s has no associated LUNs" % self.iscsi.targetIQN) continue scsiid_path = "/dev/disk/by-id/scsi-" + self.SCSIid if not util.wait_for_path(scsiid_path, ISCSISR.MAX_TIMEOUT): util.SMlog("%s not found" % scsiid_path) continue for file in filter(self.iscsi.match_lun, util.listdir(self.iscsi.path)): lun_path = os.path.join(self.iscsi.path, file) lun_dev = scsiutil.getdev(lun_path) try: lun_scsiid = scsiutil.getSCSIid(lun_dev) except: util.SMlog("getSCSIid failed on %s in iscsi %s: LUN" " offline or iscsi path down" % (lun_dev, self.iscsi.path)) continue util.SMlog("dev from lun %s %s" % (lun_dev, lun_scsiid)) if lun_scsiid == self.SCSIid: util.SMlog("lun match in %s" % self.iscsi.path) dev_match = True # No more need to raise ISCSITarget exception. # Resetting attempt_discovery attempt_discovery = False break if dev_match: if iii == 0: break util.SMlog("IQN reordering needed") new_iscsiSRs = [] IQNs = {} IQNstring = "" # iscsiSRs can be seen as a circular buffer: the head now is the matching one for kkk in range(iii, len(self.iscsiSRs)) + range(0, iii): new_iscsiSRs.append(self.iscsiSRs[kkk]) ipaddr = self.iscsiSRs[kkk].target port = self.iscsiSRs[kkk].port iqn = self.iscsiSRs[kkk].targetIQN key = "%s,%s,%s" % (ipaddr, port, iqn) # The final string must preserve the order without repetition if not IQNs.has_key(key): IQNs[key] = "" IQNstring += "%s|" % key util.SMlog("IQNstring is now %s" % IQNstring) self.iscsiSRs = new_iscsiSRs util.SMlog("iqn %s is leading now" % self.iscsiSRs[0].targetIQN) # Updating pbd entry, if any try: pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) if pbd <> None and self.dconf.has_key('multiSession'): util.SMlog("Updating multiSession in PBD") dconf = self.session.xenapi.PBD.get_device_config( pbd) dconf['multiSession'] = IQNstring self.session.xenapi.PBD.set_device_config( pbd, dconf) except: pass break if not target_success and attempt_discovery: raise xs_errors.XenError('ISCSITarget') # Check for any unneeded open iscsi sessions if forced_login == True: try: iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN) iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN) forced_login = False except: raise xs_errors.XenError('ISCSILogout') self._pathrefresh(OCFSoISCSISR, load=False) OCFSSR.OCFSSR.load(self, sr_uuid)
def cmd_init(self, namespace, backend=None, nreplicas=None, encrypt_key=None): # already initialized? if self.check_sanity(): dbg.err("already initialized %s (%s)" \ % (self.path_root, self.namespace)) return False os.mkdir(self.path_meta) os.mkdir(self.path_objs) # build config opts conf = util.new_config() # core: unique/permanent info about local machine (often called client) # NOTE. not sure if encryption_key should be in core, or unchangable conf.add_section('core') conf.set('core', 'namespace', namespace) conf.set('core', 'clientid', util.gen_uuid()) conf.set('core', 'encryptkey', _get_conf_encryptkey(encrypt_key)) # backend: info about sync service providers # XXX: Error handling conf.add_section('backend') try: services = _get_conf_services(backend) conf.set('backend', 'services', services) conf.set('backend', 'nreplicas', _get_conf_nreplicas(nreplicas, len(services.split(",")))) except: pass # flush with open(self.path_conf, "w") as fd: conf.write(fd) try: self._load() except NameError: shutil.rmtree(self.path_meta) return False # put config into remote conf.remove_option('core', 'clientid') conf.remove_option('core', 'encryptkey') with io.BytesIO() as out: conf.write(out) val = out.getvalue() configname = util.sha1(val) self._put_all_content( val, self.get_remote_path("configs/%s" % configname[:6]), True) #temporary --- move this to pPaxos #self._put_all_content(configname[:6], self.get_remote_path("config"), True) # Format for master: headhash.config[:6].version prev_master = "." + configname[:6] + ".0" # do we need both? or shall we put them into a file together. with open(self.get_head(), "w") as f: f.write(prev_master) with open(self.get_prev(), "w") as f: f.write(prev_master) self._put_all_dir(self.get_remote_path("objects")) # change to put_content self._put_all(self.get_head(), self.get_remote_path(self.get_head_name())) self._put_all(self.get_prev(), self.get_remote_path(self.get_prev_name())) from paxos import Proposer self.proposer = Proposer(None, self.services, self.get_pPaxos_path(prev_master)) self._join() return True
def _snapshot(self, snap_type): util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) args = [] args.append("vdi_clone") args.append(self.sr.uuid) args.append(self.uuid) dest = None dst = None if snap_type == self.SNAPSHOT_DOUBLE: dest = util.gen_uuid() dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) args.append(dest) if self.hidden: raise xs_errors.XenError('VDIClone', opterr='hidden VDI') depth = vhdutil.getDepth(self.path) if depth == -1: raise xs_errors.XenError('VDIUnavailable', \ opterr='failed to get VHD depth') elif depth >= vhdutil.MAX_CHAIN_SIZE: raise xs_errors.XenError('SnapshotChainTooLong') # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() num_vdis = 2 if snap_type == self.SNAPSHOT_SINGLE: num_vdis = 1 if (sr_size - reserved) < ((self.size + VDI.VDIMetadataSize( \ vhdutil.VDI_TYPE_VHD, self.size)) * num_vdis): raise xs_errors.XenError('SRNoSpace') newuuid = util.gen_uuid() src = self.path newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) newsrcname = "%s.%s" % (newuuid, self.vdi_type) if not self._checkpath(src): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI %s unavailable %s' % (self.uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: try: util.ioretry(lambda: os.rename(src, newsrc)) except util.CommandException, inst: if inst.code != errno.ENOENT: # failed to rename, simply raise error util.end_log_entry(self.sr.path, self.path, ["error"]) raise try: util.ioretry(lambda: self._snap(src, newsrcname)) if snap_type == self.SNAPSHOT_DOUBLE: util.ioretry(lambda: self._snap(dst, newsrcname)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) except util.CommandException, inst: if inst.code != errno.EIO: raise
def load(self, sr_uuid): if not sr_uuid: # This is a probe call, generate a temp sr_uuid sr_uuid = util.gen_uuid() driver = SR.driver('iscsi') if self.original_srcmd.dconf.has_key('target'): self.original_srcmd.dconf['targetlist'] = self.original_srcmd.dconf['target'] iscsi = driver(self.original_srcmd, sr_uuid) self.iscsiSRs = [] self.iscsiSRs.append(iscsi) if self.dconf['target'].find(',') == 0 or self.dconf['targetIQN'] == "*": # Instantiate multiple sessions self.iscsiSRs = [] if self.dconf['targetIQN'] == "*": IQN = "any" else: IQN = self.dconf['targetIQN'] dict = {} IQNstring = "" IQNs = [] try: if self.dconf.has_key('multiSession'): IQNs = self.dconf['multiSession'].split("|") for IQN in IQNs: if IQN: dict[IQN] = "" else: try: IQNs.remove(IQN) except: # Exceptions are not expected but just in case pass # Order in multiSession must be preserved. It is important for dual-controllers. # IQNstring cannot be built with a dictionary iteration because of this IQNstring = self.dconf['multiSession'] else: for tgt in self.dconf['target'].split(','): try: tgt_ip = util._convertDNS(tgt) except: raise xs_errors.XenError('DNSError') iscsilib.ensure_daemon_running_ok(iscsi.localIQN) map = iscsilib.discovery(tgt_ip,iscsi.port,iscsi.chapuser,iscsi.chappassword,targetIQN=IQN) util.SMlog("Discovery for IP %s returned %s" % (tgt,map)) for i in range(0,len(map)): (portal,tpgt,iqn) = map[i] (ipaddr, port) = iscsilib.parse_IP_port(portal) try: util._testHost(ipaddr, long(port), 'ISCSITarget') except: util.SMlog("Target Not reachable: (%s:%s)" % (ipaddr, port)) continue key = "%s,%s,%s" % (ipaddr,port,iqn) dict[key] = "" # Again, do not mess up with IQNs order. Dual controllers will benefit from that if IQNstring == "": # Compose the IQNstring first for key in dict.iterkeys(): IQNstring += "%s|" % key # Reinitialize and store iterator key_iterator = dict.iterkeys() else: key_iterator = IQNs # Now load the individual iSCSI base classes for key in key_iterator: (ipaddr,port,iqn) = key.split(',') srcmd_copy = copy.deepcopy(self.original_srcmd) srcmd_copy.dconf['target'] = ipaddr srcmd_copy.dconf['targetIQN'] = iqn srcmd_copy.dconf['multiSession'] = IQNstring util.SMlog("Setting targetlist: %s" % srcmd_copy.dconf['targetlist']) self.iscsiSRs.append(driver(srcmd_copy, sr_uuid)) pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) if pbd <> None and not self.dconf.has_key('multiSession'): dconf = self.session.xenapi.PBD.get_device_config(pbd) dconf['multiSession'] = IQNstring self.session.xenapi.PBD.set_device_config(pbd, dconf) except: util.logException("LVHDoISCSISR.load") self.iscsi = self.iscsiSRs[0] # Be extremely careful not to throw exceptions here since this function # is the main one used by all operations including probing and creating pbd = None try: pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) except: pass # Apart from the upgrade case, user must specify a SCSIid if not self.dconf.has_key('SCSIid'): # Dual controller issue self.LUNs = {} # Dict for LUNs from all the iscsi objects for ii in range(0, len(self.iscsiSRs)): self.iscsi = self.iscsiSRs[ii] self._LUNprint(sr_uuid) for key in self.iscsi.LUNs: self.LUNs[key] = self.iscsi.LUNs[key] self.print_LUNs_XML() self.iscsi = self.iscsiSRs[0] # back to original value raise xs_errors.XenError('ConfigSCSIid') self.SCSIid = self.dconf['SCSIid'] # This block checks if the first iscsi target contains the right SCSIid. # If not it scans the other iscsi targets because chances are that more # than one controller is present dev_match = False forced_login = False # No need to check if only one iscsi target is present if len(self.iscsiSRs) == 1: pass else: target_success = False attempt_discovery = False for iii in range(0, len(self.iscsiSRs)): # Check we didn't leave any iscsi session open # If exceptions happened before, the cleanup function has worked on the right target. if forced_login == True: try: iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN) iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN) forced_login = False except: raise xs_errors.XenError('ISCSILogout') self.iscsi = self.iscsiSRs[iii] util.SMlog("path %s" %self.iscsi.path) util.SMlog("iscsci data: targetIQN %s, portal %s" % (self.iscsi.targetIQN, self.iscsi.target)) iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN) if not iscsilib._checkTGT(self.iscsi.targetIQN): attempt_discovery = True try: # Ensure iscsi db has been populated map = iscsilib.discovery( self.iscsi.target, self.iscsi.port, self.iscsi.chapuser, self.iscsi.chappassword, targetIQN=self.iscsi.targetIQN) if len(map) == 0: util.SMlog("Discovery for iscsi data targetIQN %s," " portal %s returned empty list" " Trying another path if available" % (self.iscsi.targetIQN, self.iscsi.target)) continue except: util.SMlog("Discovery failed for iscsi data targetIQN" " %s, portal %s. Trying another path if" " available" % (self.iscsi.targetIQN, self.iscsi.target)) continue try: iscsilib.login(self.iscsi.target, self.iscsi.targetIQN, self.iscsi.chapuser, self.iscsi.chappassword, self.iscsi.incoming_chapuser, self.iscsi.incoming_chappassword, self.mpath == "true") except: util.SMlog("Login failed for iscsi data targetIQN %s," " portal %s. Trying another path" " if available" % (self.iscsi.targetIQN, self.iscsi.target)) continue target_success = True; forced_login = True # A session should be active. if not util.wait_for_path(self.iscsi.path, ISCSISR.MAX_TIMEOUT): util.SMlog("%s has no associated LUNs" % self.iscsi.targetIQN) continue scsiid_path = "/dev/disk/by-id/scsi-" + self.SCSIid if not util.wait_for_path(scsiid_path, ISCSISR.MAX_TIMEOUT): util.SMlog("%s not found" %scsiid_path) continue for file in filter(self.iscsi.match_lun, util.listdir(self.iscsi.path)): lun_path = os.path.join(self.iscsi.path,file) lun_dev = scsiutil.getdev(lun_path) try: lun_scsiid = scsiutil.getSCSIid(lun_dev) except: util.SMlog("getSCSIid failed on %s in iscsi %s: LUN" " offline or iscsi path down" % (lun_dev, self.iscsi.path)) continue util.SMlog("dev from lun %s %s" %(lun_dev, lun_scsiid)) if lun_scsiid == self.SCSIid: util.SMlog("lun match in %s" %self.iscsi.path) dev_match = True # No more need to raise ISCSITarget exception. # Resetting attempt_discovery attempt_discovery = False break if dev_match: if iii == 0: break util.SMlog("IQN reordering needed") new_iscsiSRs = [] IQNs = {} IQNstring = "" # iscsiSRs can be seen as a circular buffer: the head now is the matching one for kkk in range(iii, len(self.iscsiSRs)) + range(0, iii): new_iscsiSRs.append(self.iscsiSRs[kkk]) ipaddr = self.iscsiSRs[kkk].target port = self.iscsiSRs[kkk].port iqn = self.iscsiSRs[kkk].targetIQN key = "%s,%s,%s" % (ipaddr,port,iqn) # The final string must preserve the order without repetition if not IQNs.has_key(key): IQNs[key] = "" IQNstring += "%s|" % key util.SMlog("IQNstring is now %s" %IQNstring) self.iscsiSRs = new_iscsiSRs util.SMlog("iqn %s is leading now" %self.iscsiSRs[0].targetIQN) # Updating pbd entry, if any try: pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref) if pbd <> None and self.dconf.has_key('multiSession'): util.SMlog("Updating multiSession in PBD") dconf = self.session.xenapi.PBD.get_device_config(pbd) dconf['multiSession'] = IQNstring self.session.xenapi.PBD.set_device_config(pbd, dconf) except: pass break if not target_success and attempt_discovery: raise xs_errors.XenError('ISCSITarget') # Check for any unneeded open iscsi sessions if forced_login == True: try: iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN) iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN) forced_login = False except: raise xs_errors.XenError('ISCSILogout') self._pathrefresh(LVHDoISCSISR, load = False) LVHDSR.LVHDSR.load(self, sr_uuid)
def _snapshot(self, snap_type): util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) args = [] args.append("vdi_clone") args.append(self.sr.uuid) args.append(self.uuid) dest = None dst = None if snap_type == self.SNAPSHOT_DOUBLE: dest = util.gen_uuid() dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) args.append(dest) if self.hidden: raise xs_errors.XenError("VDIClone", opterr="hidden VDI") depth = vhdutil.getDepth(self.path) if depth == -1: raise xs_errors.XenError("VDIUnavailable", opterr="failed to get VHD depth") elif depth >= vhdutil.MAX_CHAIN_SIZE: raise xs_errors.XenError("SnapshotChainTooLong") # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() num_vdis = 2 if snap_type == self.SNAPSHOT_SINGLE or snap_type == self.SNAPSHOT_INTERNAL: num_vdis = 1 if (sr_size - reserved) < ((self.size + VDI.VDIMetadataSize(vhdutil.VDI_TYPE_VHD, self.size)) * num_vdis): raise xs_errors.XenError("SRNoSpace") newuuid = util.gen_uuid() src = self.path newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) newsrcname = "%s.%s" % (newuuid, self.vdi_type) if not self._checkpath(src): raise xs_errors.XenError("VDIUnavailable", opterr="VDI %s unavailable %s" % (self.uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: try: util.ioretry(lambda: os.rename(src, newsrc)) except util.CommandException, inst: if inst.code != errno.ENOENT: # failed to rename, simply raise error util.end_log_entry(self.sr.path, self.path, ["error"]) raise try: util.ioretry(lambda: self._snap(src, newsrcname)) if snap_type == self.SNAPSHOT_DOUBLE: util.ioretry(lambda: self._snap(dst, newsrcname)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) except util.CommandException, inst: if inst.code != errno.EIO: raise
def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) args = [] args.append("vdi_clone") args.append(self.sr.uuid) args.append(self.uuid) dest = None dst = None if snap_type == VDI.SNAPSHOT_DOUBLE: dest = util.gen_uuid() dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) args.append(dest) if self.hidden: raise xs_errors.XenError('VDIClone', opterr='hidden VDI') depth = vhdutil.getDepth(self.path) if depth == -1: raise xs_errors.XenError('VDIUnavailable', \ opterr='failed to get VHD depth') elif depth >= vhdutil.MAX_CHAIN_SIZE: raise xs_errors.XenError('SnapshotChainTooLong') # Test the amount of actual disk space if ENFORCE_VIRT_ALLOC: self.sr._loadvdis() reserved = self.sr.virtual_allocation sr_size = self.sr._getsize() num_vdis = 2 if (snap_type == VDI.SNAPSHOT_SINGLE or snap_type == VDI.SNAPSHOT_INTERNAL): num_vdis = 1 if (sr_size - reserved) < ((self.size + VDI.VDIMetadataSize( \ vhdutil.VDI_TYPE_VHD, self.size)) * num_vdis): raise xs_errors.XenError('SRNoSpace') newuuid = util.gen_uuid() src = self.path newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) newsrcname = "%s.%s" % (newuuid, self.vdi_type) if not self._checkpath(src): raise xs_errors.XenError('VDIUnavailable', \ opterr='VDI %s unavailable %s' % (self.uuid, src)) # wkcfix: multiphase util.start_log_entry(self.sr.path, self.path, args) # We assume the filehandle has been released try: util.ioretry(lambda: os.rename(src, newsrc)) # Create the snapshot under a temporary name, then rename # it afterwards. This avoids a small window where it exists # but is invalid. We do not need to do this for # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed # before so nobody will try to query it. tmpsrc = "%s.%s" % (src, "new") util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) util.ioretry(lambda: os.rename(tmpsrc, src)) if snap_type == VDI.SNAPSHOT_DOUBLE: util.ioretry(lambda: self._snap(dst, newsrcname)) # mark the original file (in this case, its newsrc) # as hidden so that it does not show up in subsequent scans util.ioretry(lambda: self._mark_hidden(newsrc)) #Verify parent locator field of both children and delete newsrc if unused introduce_parent = True try: srcparent = util.ioretry(lambda: self._query_p_uuid(src)) dstparent = None if snap_type == VDI.SNAPSHOT_DOUBLE: dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) if srcparent != newuuid and \ (snap_type == VDI.SNAPSHOT_SINGLE or \ snap_type == VDI.SNAPSHOT_INTERNAL or \ dstparent != newuuid): util.ioretry(lambda: os.unlink(newsrc)) introduce_parent = False except: pass # Introduce the new VDI records leaf_vdi = None if snap_type == VDI.SNAPSHOT_DOUBLE: leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI leaf_vdi.read_only = False leaf_vdi.location = dest leaf_vdi.size = self.size leaf_vdi.utilisation = self.utilisation leaf_vdi.sm_config = {} leaf_vdi.sm_config['vhd-parent'] = dstparent # If the parent is encrypted set the key_hash # for the new snapshot disk vdi_ref = self.sr.srcmd.params['vdi_ref'] sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if "key_hash" in sm_config: leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] # If we have CBT enabled on the VDI, # set CBT status for the new snapshot disk if cbtlog: leaf_vdi.cbt_enabled = True base_vdi = None if introduce_parent: base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent base_vdi.label = "base copy" base_vdi.read_only = True base_vdi.location = newuuid base_vdi.size = self.size base_vdi.utilisation = self.utilisation base_vdi.sm_config = {} grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) if grandparent.find("no parent") == -1: base_vdi.sm_config['vhd-parent'] = grandparent try: if snap_type == VDI.SNAPSHOT_DOUBLE: leaf_vdi_ref = leaf_vdi._db_introduce() util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ (leaf_vdi_ref,dest)) if introduce_parent: base_vdi_ref = base_vdi._db_introduce() self.session.xenapi.VDI.set_managed(base_vdi_ref, False) util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref, newuuid)) vdi_ref = self.sr.srcmd.params['vdi_ref'] sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) sm_config['vhd-parent'] = srcparent self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) except Exception, e: util.SMlog( "vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) # Note it's too late to actually clean stuff up here: the base disk has # been marked as deleted already. util.end_log_entry(self.sr.path, self.path, ["error"]) raise except util.CommandException, inst: # XXX: it might be too late if the base disk has been marked as deleted! self._clonecleanup(src, dst, newsrc) util.end_log_entry(self.sr.path, self.path, ["error"]) raise xs_errors.XenError('VDIClone', opterr='VDI clone failed error %d' % inst.code)
class SMBSR(FileSR.FileSR): """SMB file-based storage repository""" def handles(type): return type == 'smb' handles = staticmethod(handles) def load(self, sr_uuid): self.ops_exclusive = FileSR.OPS_EXCLUSIVE self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = SR.DEFAULT_TAP self.driver_config = DRIVER_CONFIG if not self.dconf.has_key('server'): raise xs_errors.XenError('ConfigServerMissing') self.remoteserver = self.dconf['server'] if self.sr_ref and self.session is not None: self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) else: self.sm_config = self.srcmd.params.get('sr_sm_config') or {} self.credentials = None self.mountpoint = os.path.join(SR.MOUNT_BASE, 'SMB', self.__extract_server(), sr_uuid) self.linkpath = os.path.join(self.mountpoint, sr_uuid or "") # Remotepath is the absolute path inside a share that is to be mounted # For a SMB SR, only the root can be mounted. self.remotepath = '' self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) self._check_o_direct() def checkmount(self): return util.ioretry(lambda: ((util.pathexists(self.mountpoint) and \ util.ismount(self.mountpoint)) and \ util.pathexists(self.linkpath))) def mount(self, mountpoint=None): """Mount the remote SMB export at 'mountpoint'""" if mountpoint == None: mountpoint = self.mountpoint elif not util.is_string(mountpoint) or mountpoint == "": raise SMBException("mountpoint not a string object") try: if not util.ioretry(lambda: util.isdir(mountpoint)): util.ioretry(lambda: util.makedirs(mountpoint)) except util.CommandException, inst: raise SMBException("Failed to make directory: code is %d" % inst.code) self.credentials = os.path.join("/tmp", util.gen_uuid()) options = self.getMountOptions() if options: options = ",".join(str(x) for x in options if x) try: util.ioretry(lambda: util.pread( ["mount.cifs", self.remoteserver, mountpoint, "-o", options]), errlist=[errno.EPIPE, errno.EIO], maxretry=2, nofail=True) except util.CommandException, inst: raise SMBException("mount failed with return code %d" % inst.code)
def cmd_init(self, namespace, backend=None, nreplicas=None, encrypt_key=None): # already initialized? if self.check_sanity(): dbg.err("already initialized %s (%s)" \ % (self.path_root, self.namespace)) return False os.mkdir(self.path_meta) os.mkdir(self.path_objs) # build config opts conf = util.new_config() # core: unique/permanent info about local machine (often called client) # NOTE. not sure if encryption_key should be in core, or unchangable conf.add_section('core') conf.set('core', 'namespace' , namespace) conf.set('core', 'clientid' , util.gen_uuid()) conf.set('core', 'encryptkey', _get_conf_encryptkey(encrypt_key)) # backend: info about sync service providers # XXX: Error handling conf.add_section('backend') try: services = _get_conf_services(backend) conf.set('backend', 'services' , services) conf.set('backend', 'nreplicas', _get_conf_nreplicas(nreplicas, len(services.split(",")))) except: pass # flush with open(self.path_conf, "w") as fd: conf.write(fd) try: self._load() except NameError: shutil.rmtree(self.path_meta) return False # put config into remote conf.remove_option('core','clientid') conf.remove_option('core','encryptkey') with io.BytesIO() as out: conf.write(out) val = out.getvalue() configname = util.sha1(val) self._put_all_content(val, self.get_remote_path("configs/%s" % configname[:6]), True) #temporary --- move this to pPaxos #self._put_all_content(configname[:6], self.get_remote_path("config"), True) # Format for master: headhash.config[:6].version prev_master = "." + configname[:6] + ".0" # do we need both? or shall we put them into a file together. with open(self.get_head(), "w") as f: f.write(prev_master) with open(self.get_prev(), "w") as f: f.write(prev_master) self._put_all_dir(self.get_remote_path("objects")) # change to put_content self._put_all(self.get_head() , self.get_remote_path(self.get_head_name())) self._put_all(self.get_prev() , self.get_remote_path(self.get_prev_name())) from paxos import Proposer self.proposer = Proposer(None, self.services, self.get_pPaxos_path(prev_master)) self._join() return True
def cmd_clone(self, namespace, backend=None, encrypt_key=None): # if wrong target if self.check_sanity(): return False # reset all the path by including the namespace self.path_root = os.path.join(self.path_root, namespace) self.path_meta = os.path.join(self.path_root, META_DIR) self.path_conf = self.get_path("config") self.path_objs = self.get_path("objects") #self.path_head_history = self.get_path("head_history") if os.path.exists(self.path_root): dbg.err("%s already exists." % self.path_root) return False if backend is None: print "input one of the storage backends, (e.g., dropbox,google,box)" print " for testing, use disk@/path (e.g., disk@/tmp)" backend = raw_input("> ") srv = services.factory(backend) self.namespace = namespace # create repo directory os.mkdir(self.path_root) os.mkdir(self.path_meta) os.mkdir(self.path_objs) curmaster = self.get_uptodate_master(False, srv) sp = curmaster.split(".") master = sp[0] seed = sp[1] seed = srv.get(self.get_remote_path("configs/%s" % seed)) conf = util.loads_config(seed) # setup client specific info conf.set('core', 'clientid' , util.gen_uuid()) conf.set('core', 'encryptkey', _get_conf_encryptkey(encrypt_key)) with open(self.path_conf, "w") as fd: conf.write(fd) self._load() beg = time.time() self.bstore_download() self._join() with open(self.get_head(), "w") as f: f.write(curmaster) with open(self.get_prev(), "w") as f: f.write(curmaster) # send my head to remote self._put_all(self.get_head(), self.get_remote_path(self.get_head_name())) self._put_all(self.get_prev(), self.get_remote_path(self.get_prev_name())) self._join() if (master): ret = self.restore_from_master() end = time.time() dbg.dbg("clone: %ss" % (end-beg)) return True
def cmd_clone(self, namespace, backend=None, encrypt_key=None): # if wrong target if self.check_sanity(): return False # reset all the path by including the namespace self.path_root = os.path.join(self.path_root, namespace) self.path_meta = os.path.join(self.path_root, META_DIR) self.path_conf = self.get_path("config") self.path_objs = self.get_path("objects") #self.path_head_history = self.get_path("head_history") if os.path.exists(self.path_root): dbg.err("%s already exists." % self.path_root) return False if backend is None: print "input one of the storage backends, (e.g., dropbox,google,box)" print " for testing, use disk@/path (e.g., disk@/tmp)" backend = raw_input("> ") srv = services.factory(backend) self.namespace = namespace # create repo directory os.mkdir(self.path_root) os.mkdir(self.path_meta) os.mkdir(self.path_objs) curmaster = self.get_uptodate_master(False, srv) sp = curmaster.split(".") master = sp[0] seed = sp[1] seed = srv.get(self.get_remote_path("configs/%s" % seed)) conf = util.loads_config(seed) # setup client specific info conf.set('core', 'clientid', util.gen_uuid()) conf.set('core', 'encryptkey', _get_conf_encryptkey(encrypt_key)) with open(self.path_conf, "w") as fd: conf.write(fd) self._load() beg = time.time() self.bstore_download() self._join() with open(self.get_head(), "w") as f: f.write(curmaster) with open(self.get_prev(), "w") as f: f.write(curmaster) # send my head to remote self._put_all(self.get_head(), self.get_remote_path(self.get_head_name())) self._put_all(self.get_prev(), self.get_remote_path(self.get_prev_name())) self._join() if (master): ret = self.restore_from_master() end = time.time() dbg.dbg("clone: %ss" % (end - beg)) return True
def clone(self, sr_uuid, snap_uuid): util.SMlog("RBDVDI.clone: sr_uuid=%s, snap_uuid=%s" % (sr_uuid, snap_uuid)) snap_vdi_ref = self.session.xenapi.VDI.get_by_uuid(snap_uuid) if self.session.xenapi.VDI.get_sharable(snap_vdi_ref): return snap_vdi_ref.get_params() snap_sm_config = self.session.xenapi.VDI.get_sm_config(snap_vdi_ref) if snap_sm_config.has_key("snapshot-of"): base_uuid = snap_sm_config["snapshot-of"] else: snapVDI = self._snapshot(sr_uuid, snap_uuid) base_uuid = snap_uuid snap_uuid = snapVDI.uuid self.sr.scan(self.sr.uuid) util.SMlog("RBDVDI.clone base_uuid = %s" % (base_uuid)) if snap_sm_config.has_key("rollback"): if snap_sm_config["rollback"] == 'true': util.SMlog("RBDVDI.clone reverting %s to %s" % (snap_uuid, base_uuid)) # executing rollback of snapshot (reverting VM to snapshot) new_uuid = snap_sm_config["new_uuid"] self._rollback_snapshot(new_uuid, snap_uuid) baseVDI = RBDVDI( self.sr, new_uuid, self.session.xenapi.VDI.get_name_label(snap_vdi_ref)) baseVDI.path = self.sr._get_path(new_uuid) baseVDI.location = baseVDI.uuid baseVDI.size = self.session.xenapi.VDI.get_virtual_size( snap_vdi_ref) baseVDI.sm_config["vdi_type"] = 'aio' baseVDI.sm_config["reverted"] = 'true' base_vdi_ref = baseVDI._db_introduce() vdis = self.session.xenapi.SR.get_VDIs(self.sr.sr_ref) for tmp_vdi in vdis: tmp_vdi_uuid = self.session.xenapi.VDI.get_uuid(tmp_vdi) tmp_sm_config = self.session.xenapi.VDI.get_sm_config( tmp_vdi) if tmp_sm_config.has_key("rollback"): if tmp_sm_config.has_key("new_uuid"): if tmp_sm_config["new_uuid"] == new_uuid: sm_config = self.session.xenapi.VDI.get_sm_config( tmp_vdi) del sm_config['snapshot-of'] sm_config['snapshot-of'] = new_uuid del sm_config['rollback'] del sm_config['new_uuid'] self.session.xenapi.VDI.set_sm_config( tmp_vdi, sm_config) return baseVDI.get_params() else: base_vdi_meta = self._get_vdi_meta(base_uuid) base_vdi_ref = self.session.xenapi.VDI.get_by_uuid(base_uuid) if base_vdi_meta.has_key('VDI_LABEL'): base_vdi_label = base_vdi_meta['VDI_LABEL'] else: base_vdi_label = '' clone_uuid = util.gen_uuid() cloneVDI = RBDVDI(self.sr, clone_uuid, base_vdi_label) self._do_clone(base_uuid, snap_uuid, clone_uuid, base_vdi_label) cloneVDI.path = self.sr._get_path(clone_uuid) cloneVDI.location = cloneVDI.uuid cloneVDI.sm_config["vdi_type"] = 'aio' cloneVDI.sm_config["clone-of"] = snap_uuid clone_vdi_ref = cloneVDI._db_introduce() self.session.xenapi.VDI.set_physical_utilisation( clone_vdi_ref, self.session.xenapi.VDI.get_physical_utilisation(base_vdi_ref)) self.session.xenapi.VDI.set_virtual_size( clone_vdi_ref, self.session.xenapi.VDI.get_virtual_size(base_vdi_ref)) self.sr._updateStats( self.sr.uuid, self.session.xenapi.VDI.get_virtual_size(base_vdi_ref)) return cloneVDI.get_params()