def detach(self, sr_uuid, vdi_uuid): util.SMlog("RBDVDI.detach: sr_uuid=%s, vdi_uuid=%s" % (sr_uuid, vdi_uuid)) vdi_ref = self.sr.srcmd.params['vdi_ref'] sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref) host_uuid = inventory.get_localhost_uuid() self.size = int(self.session.xenapi.VDI.get_virtual_size(vdi_ref)) if sm_config.has_key("sxm_mirror"): self.session.xenapi.VDI.remove_from_sm_config( vdi_ref, 'sxm_mirror') if sm_config.has_key("snapshot-of"): base_uuid = sm_config["snapshot-of"] # it's a snapshot VDI, detach it as snapshot self._unmap_SNAP(base_uuid, vdi_uuid, self.size) else: self._unmap_VHD(vdi_uuid, self.size) self.attached = False self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'attached') sr_dev_instances = json.loads(sr_sm_config["dev_instances"]) self.session.xenapi.SR.remove_from_sm_config(self.sr.sr_ref, "dev_instances") if sr_dev_instances["hosts"].has_key(host_uuid): for i in range(cephutils.NBDS_MAX): if sr_dev_instances["hosts"][host_uuid][i] == vdi_uuid: sr_dev_instances["hosts"][host_uuid][i] = None break self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances)) self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, "dev_instance")
def attach(self, sr_uuid): util.SMlog("RBDSR.attach: sr_uuid=%s" % sr_uuid) if not self.RBDPOOLs.has_key(self.uuid): raise xs_errors.XenError('SRUnavailable', opterr='no pool with uuid: %s' % sr_uuid) host_uuid = inventory.get_localhost_uuid() self.lock.acquire() sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) if sr_sm_config.has_key("dev_instances"): sr_dev_instances = json.loads(sr_sm_config["dev_instances"]) self.session.xenapi.SR.remove_from_sm_config( self.sr_ref, "dev_instances") else: sr_dev_instances = {"hosts": {}} sr_dev_instances["hosts"][host_uuid] = [None] * cephutils.NBDS_MAX sr_dev_instances["hosts"][host_uuid][0] = "reserved" self.session.xenapi.SR.add_to_sm_config(self.sr_ref, "dev_instances", json.dumps(sr_dev_instances)) self.lock.release() cephutils.SR.attach(self, sr_uuid)
def _call_plugin(self, op, args): util.SMlog("Calling cephutils.VDI._call_plugin: op=%s" % op) vdi_uuid = args['vdi_uuid'] vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) util.SMlog("Calling ceph_plugin") if filter(lambda x: x.startswith('host_'), sm_config.keys()): for key in filter(lambda x: x.startswith('host_'), sm_config.keys()): host_ref = key[len('host_'):] util.SMlog("Calling '%s' on host %s" % (op, host_ref)) if not self.session.xenapi.host.call_plugin( host_ref, "ceph_plugin", op, args): # Failed to pause node raise util.SMException("failed to %s VDI %s" % (op, mirror_uuid)) else: host_uuid = inventory.get_localhost_uuid() host_ref = self.session.xenapi.host.get_by_uuid(host_uuid) util.SMlog("Calling '%s' on localhost %s" % (op, host_ref)) if not self.session.xenapi.host.call_plugin( host_ref, "ceph_plugin", op, args): # Failed to pause node raise util.SMException("failed to %s VDI %s" % (op, mirror_uuid))
def _unmap_sxm_base(self, vdi_uuid, size): _vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid) _dev_name = "%s/%s" % (self.sr.DEV_ROOT, _vdi_name) _dmdev_name = "%s%s" % (self.sr.DM_ROOT, _vdi_name) _dm_name = "%s-%s" % (self.sr.CEPH_POOL_NAME, _vdi_name) vdi_name = "%s" % (vdi_uuid) dev_name = "%s/%s" % (self.sr.SR_ROOT, vdi_name) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref) dm = "base" if self.session.xenapi.VDI.get_sharable(vdi_ref): sharable = "true" else: sharable = "false" util.SMlog( "Calling cephutills.VDI._unmap_sxm_base: vdi_uuid=%s, size=%s, dm=%s, sharable=%s" % (vdi_uuid, size, dm, sharable)) args = { "mode": self.mode, "vdi_uuid": vdi_uuid, "vdi_name": vdi_name, "dev_name": dev_name, "_vdi_name": _vdi_name, "_dev_name": _dev_name, "_dmdev_name": _dmdev_name, "_dm_name": _dm_name, "CEPH_POOL_NAME": self.sr.CEPH_POOL_NAME, "NBDS_MAX": str(NBDS_MAX), "CEPH_USER": self.sr.CEPH_USER, "sharable": sharable, "dm": dm, "size": str(size) } self._call_plugin('unmap', args) self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'dm') sr_dev_instances = json.loads(sr_sm_config["dev_instances"]) self.session.xenapi.SR.remove_from_sm_config(self.sr.sr_ref, "dev_instances") host_uuid = inventory.get_localhost_uuid() if sr_dev_instances["hosts"].has_key(host_uuid): for i in range(NBDS_MAX): if sr_dev_instances["hosts"][host_uuid][i] == vdi_uuid: sr_dev_instances["hosts"][host_uuid][i] = None break self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances)) self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, "dev_instance")
def renameif(session): uuid = inventory.get_localhost_uuid() host = session.xenapi.host.get_by_uuid(uuid) pool = session.xenapi.pool.get_all()[0] master = session.xenapi.pool.get_master(pool) if host != master: warn( "This host is a slave; it is not possible to rename the management interface" ) pifs = session.xenapi.PIF.get_all_records() for ref in pifs.keys(): if pifs[ref]['host'] != host or pifs[ref]['physical'] != True: del pifs[ref] while True: print("Current mappings:") show_pifs(pifs) print() print( "Type 'quit' to quit; 'save' to save; or a NIC number or MAC address to edit" ) print("> ", end=' ') x = sys.stdin.readline().strip() if x.lower() == 'quit': sys.exit(0) if x.lower() == 'save': # If a slave, filter out the management PIF if host != master: for ref in pifs.keys(): if pifs[ref]['management']: del pifs[ref] save(session, host, pifs) sys.exit(0) pif = select(pifs, x) if pif != None: # Make sure this is not a slave's management PIF if host != master and pifs[pif]['management']: print( "ERROR: cannot modify the management interface of a slave." ) else: print("Selected NIC with MAC '%s'. Enter new NIC number:" % pifs[pif]['MAC']) print("> ", end=' ') nic = sys.stdin.readline().strip() if not (nic.isdigit()): print("ERROR: must enter a number (e.g. 0, 1, 2, 3, ...)") else: pifs[pif]['device'] = "eth" + nic else: print("NIC '%s' not found" % (x)) print()
def renameif(session): uuid = inventory.get_localhost_uuid () host = session.xenapi.host.get_by_uuid(uuid) pool = session.xenapi.pool.get_all()[0] master = session.xenapi.pool.get_master(pool) if host <> master: warn("This host is a slave; it is not possible to rename the management interface") pifs = session.xenapi.PIF.get_all_records() for ref in pifs.keys(): if pifs[ref]['host'] <> host or pifs[ref]['physical'] <> True: del pifs[ref] while True: print "Current mappings:" show_pifs(pifs) print print "Type 'quit' to quit; 'save' to save; or a NIC number or MAC address to edit" print "> ", x = sys.stdin.readline().strip() if x.lower() == 'quit': sys.exit(0) if x.lower() == 'save': # If a slave, filter out the management PIF if host <> master: for ref in pifs.keys(): if pifs[ref]['management']: del pifs[ref] save(session, host, pifs) sys.exit(0) pif = select(pifs, x) if pif <> None: # Make sure this is not a slave's management PIF if host <> master and pifs[pif]['management']: print "ERROR: cannot modify the management interface of a slave." else: print "Selected NIC with MAC '%s'. Enter new NIC number:" % pifs[pif]['MAC'] print "> ", nic = sys.stdin.readline().strip() if not(nic.isdigit()): print "ERROR: must enter a number (e.g. 0, 1, 2, 3, ...)" else: pifs[pif]['device'] = "eth" + nic else: print "NIC '%s' not found" % (x) print
def main(session, args): remote_host_uuid = args["remote_host_uuid"] # Find this Host's management interface: this_pif = find_host_mgmt_pif(session, inventory.get_localhost_uuid()) # Find the name of the bridge to which it is connected: this_network = session.xenapi.PIF.get_network(this_pif) this_bridge = session.xenapi.network.get_bridge(this_network) # The management IP address is on the dom0 bridge (not the backend device) this_broadcast_addr = find_interface_broadcast_ip(this_bridge) # Find the remote Host remote_host = session.xenapi.host.get_by_uuid(remote_host_uuid) # Find the remote Host's management interface: remote_pif = find_host_mgmt_pif(session, remote_host_uuid) # Find the MAC address of the management interface: mac = session.xenapi.PIF.get_MAC(remote_pif) return wake_on_lan(session, remote_host, this_broadcast_addr, mac)
def main(session, args): remote_host_uuid = args['remote_host_uuid'] # Find this Host's management interface: this_pif = find_host_mgmt_pif(session, inventory.get_localhost_uuid()) # Find the name of the bridge to which it is connected: this_network = session.xenapi.PIF.get_network(this_pif) this_bridge = session.xenapi.network.get_bridge(this_network) # The management IP address is on the dom0 bridge (not the backend device) this_broadcast_addr = find_interface_broadcast_ip(this_bridge) # Find the remote Host remote_host = session.xenapi.host.get_by_uuid(remote_host_uuid) # Find the remote Host's management interface: remote_pif = find_host_mgmt_pif(session, remote_host_uuid) # Find the MAC address of the management interface: mac = session.xenapi.PIF.get_MAC(remote_pif) return wake_on_lan(session, remote_host, this_broadcast_addr, mac)
def wake_on_lan(session, host, remote_host_uuid): # Find this Host's management interface: this_pif = find_host_mgmt_pif(session, inventory.get_localhost_uuid()) # Find the name of the bridge to which it is connected: this_network = session.xenapi.PIF.get_network(this_pif) this_bridge = session.xenapi.network.get_bridge(this_network) # The management IP address is on the dom0 bridge (not the backend device) broadcast_addr = find_interface_broadcast_ip(this_bridge) # Find the remote Host's management interface: mgmt_pif = find_host_mgmt_pif(session, remote_host_uuid) # Find the actual physical pif remote_pif = get_physical_pif(mgmt_pif) # Find the MAC address of the management interface: mac = session.xenapi.PIF.get_MAC(remote_pif) """Attempt to wake up a machine by sending Wake-On-Lan packets encapsulated within UDP datagrams sent to the broadcast_addr.""" # A Wake-On-LAN packet contains FF:FF:FF:FF:FF:FF followed by 16 repetitions of the target MAC address target_mac = mac.split(":") bin_payload = "" for b in [ "FF" ] * 6 + (mac.split(":")) * 16: bin_payload = bin_payload + struct.pack("B", int("0x" + b, 16)) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) addr = (broadcast_addr, 9) # Port 0, 7 or 9 s.connect(addr) # Send WoL packets every 5 seconds for 5 minutes, waiting to see if the Host_metrics.live flag is est attempts = 0 finished = False metrics = None while not finished and (attempts < 60): attempts = attempts + 1 syslog.syslog("Attempt %d sending WoL packet for MAC %s to %s" % (attempts, mac, broadcast_addr)) s.send(bin_payload) time.sleep(5) metrics = session.xenapi.host.get_metrics(host) try: finished = session.xenapi.host_metrics.get_live(metrics) except: pass return str(finished)
def _call_plugin(self, op, args): vdi_uuid = args['vdi_uuid'] vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) util.SMlog("Calling ceph_plugin") if filter(lambda x: x.startswith('host_'), sm_config.keys()): for key in filter(lambda x: x.startswith('host_'), sm_config.keys()): host_ref = key[len('host_'):] util.SMlog("Calling rbd/nbd map on host %s" % host_ref) if not self.session.xenapi.host.call_plugin(host_ref, "ceph_plugin", op, args): # Failed to pause node raise util.SMException("failed to %s VDI %s" % (op, mirror_uuid)) else: host_uuid = inventory.get_localhost_uuid() host_ref = self.session.xenapi.host.get_by_uuid(host_uuid) util.SMlog("Calling rbd/nbd map on localhost %s" % host_ref) if not self.session.xenapi.host.call_plugin(host_ref, "ceph_plugin", op, args): # Failed to pause node raise util.SMException("failed to %s VDI %s" % (op, mirror_uuid))
def detach(self, sr_uuid): util.SMlog("RBDSR.detach: sr_uuid=%s" % sr_uuid) host_uuid = inventory.get_localhost_uuid() self.lock.acquire() sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) if sr_sm_config.has_key("dev_instances"): sr_dev_instances = json.loads(sr_sm_config["dev_instances"]) self.session.xenapi.SR.remove_from_sm_config( self.sr_ref, "dev_instances") sr_dev_instances["hosts"][host_uuid] = [None] * cephutils.NBDS_MAX sr_dev_instances["hosts"][host_uuid][0] = "reserved" self.session.xenapi.SR.add_to_sm_config( self.sr_ref, "dev_instances", json.dumps(sr_dev_instances)) self.lock.release() cephutils.SR.detach(self, sr_uuid)
def wake_on_lan(session, host, remote_host_uuid): # Find this Host's management interface: this_pif = find_host_mgmt_pif(session, inventory.get_localhost_uuid()) # Find the name of the bridge to which it is connected: this_network = session.xenapi.PIF.get_network(this_pif) this_bridge = session.xenapi.network.get_bridge(this_network) # The management IP address is on the dom0 bridge (not the backend device) broadcast_addr = find_interface_broadcast_ip(this_bridge) # Find the remote Host's management interface: remote_pif = find_host_mgmt_pif(session, remote_host_uuid) # Find the MAC address of the management interface: mac = session.xenapi.PIF.get_MAC(remote_pif) """Attempt to wake up a machine by sending Wake-On-Lan packets encapsulated within UDP datagrams sent to the broadcast_addr.""" # A Wake-On-LAN packet contains FF:FF:FF:FF:FF:FF followed by 16 repetitions of the target MAC address target_mac = mac.split(":") bin_payload = "" for b in [ "FF" ] * 6 + (mac.split(":")) * 16: bin_payload = bin_payload + struct.pack("B", int("0x" + b, 16)) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) addr = (broadcast_addr, 9) # Port 0, 7 or 9 s.connect(addr) # Send WoL packets every 5 seconds for 5 minutes, waiting to see if the Host_metrics.live flag is est attempts = 0 finished = False metrics = None while not finished and (attempts < 60): attempts = attempts + 1 syslog.syslog("Attempt %d sending WoL packet for MAC %s to %s" % (attempts, mac, broadcast_addr)) s.send(bin_payload) time.sleep(5) metrics = session.xenapi.host.get_metrics(host) try: finished = session.xenapi.host_metrics.get_live(metrics) except: pass return str(finished)
def _map_sxm_base(self, vdi_uuid, size): _vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid) _dev_name = "%s/%s" % (self.sr.DEV_ROOT, _vdi_name) _dmdev_name = "%s%s" % (self.sr.DM_ROOT, _vdi_name) _dm_name = "%s-%s" % (self.sr.CEPH_POOL_NAME, _vdi_name) vdi_name = "%s" % (vdi_uuid) dev_name = "%s/%s" % (self.sr.SR_ROOT, vdi_name) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref) dm = "base" if sr_sm_config.has_key("dev_instances"): sr_dev_instances = json.loads(sr_sm_config["dev_instances"]) else: sr_dev_instances = {"hosts": {}} first_free_instance = -1 host_uuid = inventory.get_localhost_uuid() if sr_dev_instances["hosts"].has_key(host_uuid): for i in range(NBDS_MAX): if sr_dev_instances["hosts"][host_uuid][i] == None: first_free_instance = i break sr_dev_instances["hosts"][host_uuid][ first_free_instance] = vdi_uuid else: #sr_dev_instances["hosts"].append({host_uuid:[None]*NBDS_MAX}) sr_dev_instances["hosts"][host_uuid] = [None] * NBDS_MAX sr_dev_instances["hosts"][host_uuid][0] = "reserved" sr_dev_instances["hosts"][host_uuid][1] = vdi_uuid first_free_instance = 1 dev = str(first_free_instance) if self.session.xenapi.VDI.get_sharable(vdi_ref): sharable = "true" else: sharable = "false" util.SMlog( "Calling cephutills.VDI._map_sxm_base: vdi_uuid=%s, size=%s, dm=%s, sharable=%s" % (vdi_uuid, size, dm, sharable)) args = { "mode": self.mode, "vdi_uuid": vdi_uuid, "vdi_name": vdi_name, "dev_name": dev_name, "_vdi_name": _vdi_name, "_dev_name": _dev_name, "_dmdev_name": _dmdev_name, "_dm_name": _dm_name, "CEPH_POOL_NAME": self.sr.CEPH_POOL_NAME, "NBDS_MAX": str(NBDS_MAX), "CEPH_USER": self.sr.CEPH_USER, "sharable": sharable, "dm": dm, "dev": dev, "size": str(size) } self._call_plugin('map', args) self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'dm', dm) self.session.xenapi.SR.remove_from_sm_config(self.sr.sr_ref, "dev_instances") self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances)) if sm_config.has_key("dev_instance"): self.session.xenapi.VDI.remove_from_sm_config( vdi_ref, "dev_instance") self.session.xenapi.VDI.add_to_sm_config(vdi_ref, "dev_instance", str(first_free_instance))
def compose(self, sr_uuid, vdi1_uuid, vdi2_uuid): """ :param sr_uuid: :param vdi1_uuid: :param vdi2_uuid: :return: """ if VERBOSE: util.SMlog("rbdsr_rbd.RBDRBDVDI.compose: sr_uuid=%s, vdi1_uuid=%s, vdi2_uuid=%s" % (sr_uuid, vdi1_uuid, vdi2_uuid)) base_uuid = vdi1_uuid mirror_uuid = vdi2_uuid mirror_vdi_ref = self.session.xenapi.VDI.get_by_uuid(mirror_uuid) mirror_sm_config = self.session.xenapi.VDI.get_sm_config(mirror_vdi_ref) local_host_uuid = inventory.get_localhost_uuid() if 'attached' in mirror_sm_config: if 'paused' not in mirror_sm_config: if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, mirror_uuid): raise util.SMException("failed to pause VDI %s" % mirror_uuid) self._unmap_rbd(mirror_uuid, self.rbd_info[1]['size'], devlinks=True, norefcount=True) self._map_rbd(mirror_uuid, self.rbd_info[1]['size'], host_uuid=local_host_uuid, dmmode='None', devlinks=True, norefcount=True) self._map_rbd(base_uuid, self.rbd_info[1]['size'], host_uuid=local_host_uuid, dmmode='base', devlinks=True, norefcount=True) """ Execute merging dm snapshot to base """ try: _mirror_vdi_name = "%s%s" % (VDI_PREFIX, mirror_uuid) _mirror_dev_name = "%s/%s" % (self.sr.DEV_ROOT, _mirror_vdi_name) _base_vdi_name = "%s%s" % (VDI_PREFIX, base_uuid) _base_dev_name = "%s/%s" % (self.sr.DEV_ROOT, _base_vdi_name) _base_dm_name = "%s-%s-base" % (self.sr.CEPH_POOL_NAME, _base_vdi_name) util.pread2(["dmsetup", "suspend", _base_dm_name]) util.pread2(["dmsetup", "reload", _base_dm_name, "--table", "0 %s snapshot-merge %s %s P 1" % (str(int(self.rbd_info[1]['size']) / 512), _base_dev_name, _mirror_dev_name)]) util.pread2(["dmsetup", "resume", _base_dm_name]) # we should wait until the merge is completed util.pread2(["waitdmmerging.sh", _base_dm_name]) except Exception as e: util.SMlog("rbdsr_rbd.RBDRBDVDI.compose Exception: %s" % str(e)) self._unmap_rbd(mirror_uuid, self.rbd_info[1]['size'], host_uuid=local_host_uuid, devlinks=True, norefcount=True) self._unmap_rbd(base_uuid, self.rbd_info[1]['size'], host_uuid=local_host_uuid, devlinks=True, norefcount=True) if 'attached' in mirror_sm_config: self._map_rbd(mirror_uuid, self.rbd_info[1]['size'], devlinks=True, norefcount=True) if 'paused' not in mirror_sm_config: if not blktap2.VDI.tap_unpause(self.session, self.sr.uuid, mirror_uuid, None): raise util.SMException("failed to unpause VDI %s" % mirror_uuid) ######## self._unmap_rbd(base_uuid, self.rbd_info[1]['size'], host_uuid=local_host_uuid, devlinks=True, norefcount=True) self._unmap_rbd(mirror_uuid, self.rbd_info[1]['size'], host_uuid=local_host_uuid, devlinks=True, norefcount=True) """ Swap snapshot and base """ tmp_uuid = "temporary" # util.gen_uuid() self._rename_rbd(mirror_uuid, tmp_uuid) self._rename_rbd(base_uuid, mirror_uuid) self._rename_rbd(tmp_uuid, base_uuid) ######## if 'attached' in mirror_sm_config: self._unmap_rbd(mirror_uuid, self.rbd_info[1]['size'], dmmode='None', devlinks=True, norefcount=True) if 'paused' not in mirror_sm_config: if not blktap2.VDI.tap_unpause(self.session, self.sr.uuid, mirror_uuid, None): raise util.SMException("failed to unpause VDI %s" % mirror_sm_config) if VERBOSE: util.SMlog("Compose done")
def attach(self, sr_uuid, vdi_uuid): util.SMlog("RBDVDI.attach: sr_uuid=%s, vdi_uuid=%s" % (sr_uuid, vdi_uuid)) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref) host_uuid = inventory.get_localhost_uuid() self.size = int(self.session.xenapi.VDI.get_virtual_size(vdi_ref)) if sr_sm_config.has_key("dev_instances"): sr_dev_instances = json.loads(sr_sm_config["dev_instances"]) self.session.xenapi.SR.remove_from_sm_config( self.sr.sr_ref, "dev_instances") else: sr_dev_instances = {"hosts": {}} first_free_instance = -1 if sr_dev_instances["hosts"].has_key(host_uuid): for i in range(cephutils.NBDS_MAX): if sr_dev_instances["hosts"][host_uuid][i] == None: first_free_instance = i break sr_dev_instances["hosts"][host_uuid][ first_free_instance] = vdi_uuid else: #sr_dev_instances["hosts"].append({host_uuid:[None]*cephutils.NBDS_MAX}) sr_dev_instances["hosts"][host_uuid] = [None] * cephutils.NBDS_MAX sr_dev_instances["hosts"][host_uuid][0] = "reserved" sr_dev_instances["hosts"][host_uuid][1] = vdi_uuid first_free_instance = 1 self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances)) if sm_config.has_key("dev_instance"): self.session.xenapi.VDI.remove_from_sm_config( vdi_ref, "dev_instance") self.session.xenapi.VDI.add_to_sm_config(vdi_ref, "dev_instance", str(first_free_instance)) self.path = self.sr._get_path(vdi_uuid) if not hasattr(self, 'xenstore_data'): self.xenstore_data = {} self.xenstore_data.update( scsiutil.update_XS_SCSIdata( self.uuid, scsiutil.gen_synthetic_page_data(self.uuid))) self.xenstore_data['storage-type'] = 'rbd' self.xenstore_data['vdi-type'] = self.vdi_type try: ########## vdis = self.session.xenapi.SR.get_VDIs(self.sr.sr_ref) has_a_snapshot = False for tmp_vdi in vdis: tmp_vdi_uuid = self.session.xenapi.VDI.get_uuid(tmp_vdi) tmp_sm_config = self.session.xenapi.VDI.get_sm_config(tmp_vdi) if tmp_sm_config.has_key("snapshot-of"): if tmp_sm_config["snapshot-of"] == vdi_uuid: has_a_snapshot = True # if tmp_sm_config.has_key("sxm_mirror"): # sxm_mirror_vdi = vdi_uuid ########## SXM VDIs if sm_config.has_key("snapshot-of"): base_uuid = sm_config["snapshot-of"] # it's a snapshot VDI, attach it as snapshot self._map_SNAP(base_uuid, vdi_uuid, self.size, "none") elif sm_config.has_key("base_mirror"): if has_a_snapshot: # it's a mirror vdi of storage migrating VM # it's attached first self.session.xenapi.VDI.add_to_sm_config( vdi_ref, 'sxm_mirror', 'true') # creating dm snapshot dev self._map_sxm_mirror(vdi_uuid, self.size) else: # it's a base vdi of storage migrating VM # it's attached after mirror VDI and mirror snapshot VDI has been created self._map_VHD(vdi_uuid, self.size, "none") ########## not SXM VDIs else: # it's not SXM VDI, just attach it self._map_VHD(vdi_uuid, self.size, "none") if not util.pathexists(self.path): raise xs_errors.XenError('VDIUnavailable', opterr='Could not find: %s' % self.path) self.attached = True if sm_config.has_key("attached"): self.session.xenapi.VDI.remove_from_sm_config( vdi_ref, 'attached') self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'attached', 'true') except: self.session.xenapi.SR.remove_from_sm_config( self.sr.sr_ref, "dev_instances") sr_dev_instances["hosts"][host_uuid][first_free_instance] = None self.session.xenapi.SR.add_to_sm_config( self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances)) self.session.xenapi.VDI.remove_from_sm_config( vdi_ref, "dev_instance") return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
def compose(self, sr_uuid, vdi1_uuid, vdi2_uuid): """ :param sr_uuid: :param vdi1_uuid: :param vdi2_uuid: :return: """ if VERBOSE: util.SMlog( "rbdsr_vhd.RBDVHDVDI.compose: sr_uuid=%s, vdi1_uuid=%s, vdi2_uuid=%s" % (sr_uuid, vdi1_uuid, vdi2_uuid)) base_uuid = vdi1_uuid mirror_uuid = vdi2_uuid base_path = self.sr._get_path(base_uuid) mirror_path = self.sr._get_path(mirror_uuid) base_vdi_ref = self.session.xenapi.VDI.get_by_uuid(base_uuid) mirror_vdi_ref = self.session.xenapi.VDI.get_by_uuid(mirror_uuid) mirror_sm_config = self.session.xenapi.VDI.get_sm_config( mirror_vdi_ref) mirror_hostRefs = self._get_vdi_hostRefs(mirror_uuid) local_host_uuid = inventory.get_localhost_uuid() BaseVDI = self.sr.vdi(base_uuid) for host_uuid in mirror_hostRefs.iterkeys(): RBDVHDVDI.attach(BaseVDI, sr_uuid, base_uuid, host_uuid=host_uuid) if local_host_uuid not in mirror_hostRefs: RBDVHDVDI.attach(BaseVDI, sr_uuid, base_uuid, host_uuid=local_host_uuid) self.attach(sr_uuid, mirror_uuid, host_uuid=local_host_uuid) vhdutil.setParent(mirror_path, base_path, False) vhdutil.setHidden(base_path) self.sr.session.xenapi.VDI.set_managed(base_vdi_ref, False) RBDVHDVDI.update( BaseVDI, sr_uuid, base_uuid ) # TODO: Check if xapi invoke update after set_* op, if it's true then we can remove this line if 'vhd-parent' in mirror_sm_config: self.session.xenapi.VDI.remove_from_sm_config( mirror_vdi_ref, 'vhd-parent') self.session.xenapi.VDI.add_to_sm_config(mirror_vdi_ref, 'vhd-parent', base_uuid) self.sm_config['vhd-parent'] = base_uuid self.update(sr_uuid, mirror_uuid) if local_host_uuid not in mirror_hostRefs: self.detach(sr_uuid, mirror_uuid, host_uuid=local_host_uuid) RBDVHDVDI.detach(BaseVDI, sr_uuid, base_uuid, host_uuid=local_host_uuid) if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, mirror_uuid, True): raise util.SMException("failed to refresh VDI %s" % mirror_uuid) if VERBOSE: util.SMlog("Compose done")
def resize(self, sr_uuid, vdi_uuid, size, online=False): """ Resize the given VDI to size <size>. Size can be any valid disk size greater than [or smaller than] the current value. :param sr_uuid: :param vdi_uuid: :param size: :param online: :return: """ if VERBOSE: util.SMlog( "rbdsr_vhd.RBDVHDVDI.resize: sr_uuid=%s, vdi_uuid=%s, size=%s" % (sr_uuid, vdi_uuid, size)) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) vdi_hostRefs = self._get_vdi_hostRefs(vdi_uuid) local_host_uuid = inventory.get_localhost_uuid() if 'attached' in sm_config and online is False: online = True raise xs_errors.XenError( 'VDIResize', opterr='Online resize is not supported in VHD mode') size = vhdutil.validate_and_round_vhd_size(long(size)) if self.sr.provision == "thin": rbdSizeNew = lvhdutil.calcSizeVHDLV(size) elif self.sr.provision == "thick": rbdSizeNew = lvhdutil.calcSizeVHDLV(size) if online: retval = super(RBDVHDVDI, self).resize_online(sr_uuid, vdi_uuid, rbdSizeNew) else: retval = super(RBDVHDVDI, self).resize(sr_uuid, vdi_uuid, rbdSizeNew) if not online: # self._map_rbd(vdi_uuid, rbdSizeNew, norefcount=True) self.attach(sr_uuid, vdi_uuid, host_uuid=local_host_uuid) else: if local_host_uuid not in vdi_hostRefs: self.attach(sr_uuid, vdi_uuid, host_uuid=local_host_uuid) vhdutil.setSizePhys(self.path, size, False) vhdutil.setSizeVirtFast(self.path, size) if online: if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, vdi_uuid, True): raise util.SMException("failed to refresh VDI %s" % vdi_uuid) if not online: # self._unmap_rbd(vdi_uuid, rbdSizeNew, norefcount=True) self.detach(sr_uuid, vdi_uuid, host_uuid=local_host_uuid) else: if local_host_uuid not in vdi_hostRefs: self.detach(sr_uuid, vdi_uuid, host_uuid=local_host_uuid) self.size = size self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(size)) self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, str(size)) return retval
def log(str): print str # Destroy the given domain def destroy_domain((domid, uuid)): log("destroying domid %s uuid %s" % (domid, uuid)) all = subprocess.Popen(["@OPTDIR@/debug/destroy_domain", "-domid", domid], stdout=subprocess.PIPE).communicate()[0] # Keep track of when a domain first looked like it should be here domain_first_noticed = {} # Number of seconds after which we conclude that a domain really shouldn't be here threshold = 60 if __name__ == "__main__": localhost_uuid = inventory.get_localhost_uuid () while True: time.sleep(1) paused = list_paused_domains () # GC the domain_first_noticed map for d in domain_first_noticed.keys(): if d not in paused: log("domid %s uuid %s: looks ok now, forgetting about it" % d) del domain_first_noticed[d] for d in list_paused_domains(): if should_domain_be_somewhere_else(localhost_uuid, d): if d not in domain_first_noticed: domain_first_noticed[d] = time.time() noticed_for = time.time() - domain_first_noticed[d] if noticed_for > threshold:
def clone(self, sr_uuid, vdi_uuid, mode='clone'): """ :param sr_uuid: :param vdi_uuid: :return: """ if VERBOSE: util.SMlog("rbdsr_vhd.RBDVHDVDI.clone: sr_uuid=%s, vdi_uuid=%s" % (sr_uuid, vdi_uuid)) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) is_a_snapshot = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref) label = self.session.xenapi.VDI.get_name_label(vdi_ref) description = self.session.xenapi.VDI.get_name_description(vdi_ref) local_host_uuid = inventory.get_localhost_uuid() if mode == 'snapshot' and is_a_snapshot: raise util.SMException("Can not make snapshot form snapshot %s" % vdi_uuid) self.size = int(self.session.xenapi.VDI.get_virtual_size(vdi_ref)) if not is_a_snapshot: base_uuid = util.gen_uuid() else: base_uuid = sm_config["vhd-parent"] clone_uuid = util.gen_uuid() if VERBOSE: util.SMlog( "rbdsr_vhd.RBDVHDVDI.clone: Pepare CloneVDI: sr_uuid=%s, clone_uuid=%s" % (sr_uuid, clone_uuid)) cloneVDI = self.sr.vdi(clone_uuid) cloneVDI.label = "%s (%s)" % (label, mode) cloneVDI.description = description cloneVDI.path = self.sr._get_path(clone_uuid) cloneVDI.location = cloneVDI.uuid cloneVDI.size = self.size cloneVDI.utilisation = self.size cloneVDI.sm_config = dict() for key, val in sm_config.iteritems(): if key not in ["type", "vdi_type", "vhd-parent", "paused", "attached"] and \ not key.startswith("host_"): cloneVDI.sm_config[key] = val if mode == 'snapshot': cloneVDI.is_a_snapshot = True cloneVDI.snapshot_of = vdi_ref retval_clone = RBDVHDVDI.create(cloneVDI, sr_uuid, clone_uuid, cloneVDI.size) clone_ref = self.session.xenapi.VDI.get_by_uuid(clone_uuid) if not is_a_snapshot: if VERBOSE: util.SMlog( "rbdsr_vhd.RBDVHDVDI.clone: Pepare BaseVDI: sr_uuid=%s, base_uuid=%s" % (sr_uuid, base_uuid)) baseVDI = self.sr.vdi(base_uuid) baseVDI.label = "%s (base)" % label baseVDI.description = description baseVDI.path = self.sr._get_path(base_uuid) baseVDI.location = baseVDI.uuid baseVDI.managed = False baseVDI.size = self.size baseVDI.utilisation = self.size baseVDI.sm_config = dict() retval_base = RBDVHDVDI.create(baseVDI, sr_uuid, base_uuid, baseVDI.size) base_ref = self.session.xenapi.VDI.get_by_uuid(base_uuid) else: base_ref = self.session.xenapi.VDI.get_by_uuid(base_uuid) baseVDI = self.sr.vdi(base_uuid) baseVDI.path = self.sr._get_path(base_uuid) baseVDI.sm_config = self.session.xenapi.VDI.get_sm_config(base_ref) if not is_a_snapshot: if 'attached' in sm_config: if VERBOSE: util.SMlog( "rbdsr_vhd.RBDVHDVDI.clone: Unmap VDI as it's mapped: sr_uuid=%s, vdi_uuid=%s" % (sr_uuid, vdi_uuid)) if 'paused' not in sm_config: if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self._unmap_rbd(vdi_uuid, self.rbd_info[1]['size'], devlinks=False, norefcount=True) if VERBOSE: util.SMlog( "rbdsr_vhd.RBDVHDVDI.clone: Swap Base and VDI: sr_uuid=%s, vdi_uuid=%s, base_uuid=%s" % (sr_uuid, vdi_uuid, base_uuid)) tmp_uuid = "temporary" # util.gen_uuid() self._rename_rbd(vdi_uuid, tmp_uuid) self._rename_rbd(base_uuid, vdi_uuid) self._rename_rbd(tmp_uuid, base_uuid) if not is_a_snapshot: if 'attached' in sm_config: self._map_rbd(vdi_uuid, self.rbd_info[1]['size'], devlinks=False, norefcount=True) base_hostRefs = self._get_vdi_hostRefs(vdi_uuid) if local_host_uuid not in base_hostRefs: self.attach(sr_uuid, vdi_uuid, host_uuid=local_host_uuid) else: self.attach(sr_uuid, vdi_uuid, host_uuid=local_host_uuid) base_hostRefs = {} else: if 'attached' not in baseVDI.sm_config: RBDVHDVDI.attach(baseVDI, sr_uuid, base_uuid, host_uuid=local_host_uuid) base_hostRefs = {} else: base_hostRefs = self._get_vdi_hostRefs(base_uuid) if local_host_uuid not in base_hostRefs: RBDVHDVDI.attach(baseVDI, sr_uuid, base_uuid, host_uuid=local_host_uuid) if is_a_snapshot: RBDVHDVDI.attach(cloneVDI, sr_uuid, clone_uuid, host_uuid=local_host_uuid) vhdutil.snapshot(cloneVDI.path, baseVDI.path, False, lvhdutil.MSIZE_MB) RBDVHDVDI.detach(cloneVDI, sr_uuid, clone_uuid, host_uuid=local_host_uuid) else: for host_uuid in base_hostRefs.iterkeys(): RBDVHDVDI.attach(baseVDI, sr_uuid, base_uuid, host_uuid=host_uuid) if local_host_uuid not in base_hostRefs: RBDVHDVDI.attach(baseVDI, sr_uuid, base_uuid, host_uuid=local_host_uuid) RBDVHDVDI.attach(cloneVDI, sr_uuid, clone_uuid, host_uuid=local_host_uuid) vhdutil.snapshot(cloneVDI.path, baseVDI.path, False, lvhdutil.MSIZE_MB) vhdutil.snapshot(self.path, baseVDI.path, False, lvhdutil.MSIZE_MB) vhdutil.setHidden(baseVDI.path) RBDVHDVDI.detach(cloneVDI, sr_uuid, clone_uuid, host_uuid=local_host_uuid) if local_host_uuid not in base_hostRefs: RBDVHDVDI.detach(baseVDI, sr_uuid, base_uuid, host_uuid=local_host_uuid) baseVDI.read_only = True self.session.xenapi.VDI.set_read_only(base_ref, True) if mode == 'snapshot': cloneVDI.read_only = True self.session.xenapi.VDI.set_read_only(clone_ref, True) cloneVDI.sm_config["vhd-parent"] = base_uuid self.session.xenapi.VDI.add_to_sm_config(cloneVDI.ref, 'vhd-parent', base_uuid) RBDVHDVDI.update(cloneVDI, sr_uuid, clone_uuid) if not is_a_snapshot: if 'vhd-parent' in sm_config: baseVDI.sm_config['vhd-parent'] = sm_config['vhd-parent'] self.session.xenapi.VDI.add_to_sm_config( baseVDI.ref, 'vhd-parent', sm_config['vhd-parent']) RBDVHDVDI.update(baseVDI, sr_uuid, base_uuid) self.session.xenapi.VDI.remove_from_sm_config( vdi_ref, 'vhd-parent') else: RBDVHDVDI.update(baseVDI, sr_uuid, base_uuid) self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'vhd-parent', base_uuid) self.sm_config['vhd-parent'] = base_uuid self.update(sr_uuid, vdi_uuid) if not is_a_snapshot: if 'attached' in sm_config: if 'paused' not in sm_config: if not blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None): raise util.SMException("failed to unpause VDI %s" % vdi_uuid) if local_host_uuid not in base_hostRefs: self.detach(sr_uuid, vdi_uuid, host_uuid=local_host_uuid) else: if local_host_uuid not in base_hostRefs: RBDVHDVDI.detach(baseVDI, sr_uuid, base_uuid, host_uuid=local_host_uuid) return retval_clone