def monitor_host(): global DOCKER_MONITOR session = None host = None # container monitoring can get a smaller slice of the CPU time os.nice(10) signal.signal(signal.SIGTERM, interrupt_handler) signal.signal(signal.SIGINT, interrupt_handler) while True: try: session = api_helper.get_local_api_session() client = api_helper.LocalXenAPIClient() # need to refresh the host, in case we just joined a pool host = api_helper.Host(client, api_helper.get_this_host_ref(session)) if not DOCKER_MONITOR: DOCKER_MONITOR = DockerMonitor(host) else: DOCKER_MONITOR.set_host(host) log.info("Monitoring host %s" % (host.get_id())) try: # Avoid race conditions - get a current event token event_from = session.xenapi.event_from(["vm"], '', 0.0) token_from = event_from['token'] # Now load the VMs that are enabled for monitoring DOCKER_MONITOR.refresh() while True: event_from = session.xenapi.event_from( ["vm"], token_from, EVENT_FROM_TIMEOUT_S) token_from = event_from['token'] events = event_from['events'] for event in events: if (event['operation'] == 'mod' and 'snapshot' in event): # At this point the monitor may need to # refresh it's monitoring state of a particular # vm. DOCKER_MONITOR.process_vmrecord(event['ref'], event['snapshot']) elif event['operation'] == 'del': DOCKER_MONITOR.process_vm_del(event['ref']) finally: try: session.xenapi.session.logout() except XenAPI.Failure: log.exception("Failed when trying to logout") except (socket.error, XenAPI.Failure, xmlrpclib.ProtocolError) as e: if session is not None: log.exception(e) log.error("Could not connect to XAPI - Is XAPI running? " + "Will retry in %d" % (XAPIRETRYSLEEPINS)) else: log.exception("Recovering from XAPI failure - Is XAPI " + "restarting? Will retry in %d." % (XAPIRETRYSLEEPINS)) time.sleep(XAPIRETRYSLEEPINS) api_helper.reinit_global_xapi_session()
def monitor_host(): global DOCKER_MONITOR session = None host = None # container monitoring can get a smaller slice of the CPU time os.nice(10) signal.signal(signal.SIGTERM, interrupt_handler) signal.signal(signal.SIGINT, interrupt_handler) while True: try: session = api_helper.get_local_api_session() client = api_helper.LocalXenAPIClient() # need to refresh the host, in case we just joined a pool host = api_helper.Host(client, api_helper.get_this_host_ref(session)) if not DOCKER_MONITOR: DOCKER_MONITOR = DockerMonitor(host) else: DOCKER_MONITOR.set_host(host) log.info("Monitoring host %s" % (host.get_id())) try: # Avoid race conditions - get a current event token event_from = session.xenapi.event_from(["vm"], '', 0.0) token_from = event_from['token'] # Now load the VMs that are enabled for monitoring DOCKER_MONITOR.refresh() while True: event_from = session.xenapi.event_from( ["vm"], token_from, EVENT_FROM_TIMEOUT_S) token_from = event_from['token'] events = event_from['events'] for event in events: if (event['operation'] == 'mod' and 'snapshot' in event): # At this point the monitor may need to # refresh it's monitoring state of a particular # vm. DOCKER_MONITOR.process_vmrecord(event['ref'], event['snapshot']) elif event['operation'] == 'del': DOCKER_MONITOR.process_vm_del(event['ref']) finally: try: session.xenapi.session.logout() except XenAPI.Failure: log.exception("Failed when trying to logout") except (socket.error, XenAPI.Failure, xmlrpclib.ProtocolError) as e: if session is not None: log.exception(e) log.error("Could not connect to XAPI - Is XAPI running? " + "Will retry in %d" % (XAPIRETRYSLEEPINS)) else: log.exception("Recovering from XAPI failure - Is XAPI " + "restarting? Will retry in %d." % (XAPIRETRYSLEEPINS)) time.sleep(XAPIRETRYSLEEPINS) api_helper.reinit_global_xapi_session()
def execute_docker_data_listen(session, vm_uuid, request, stop_monitoring_request): host = api_helper.get_suitable_vm_ip(session, vm_uuid, DOCKER_TLS_PORT) log.info("tls.execute_docker_listen_charbychar for VM %s, via %s" % (vm_uuid, host)) asocket = _get_socket(session, vm_uuid) try: asocket.connect((host, DOCKER_TLS_PORT)) if hasattr(asocket, 'version'): # Newer python versions provide the TLS version log.info("Connected VM %s using %s" % (vm_uuid, asocket.version())) asocket.send(request) asocket.setblocking(0) while not stop_monitoring_request: rlist, _, _ = select.select( [asocket.fileno()], [], [], constants.MONITOR_EVENTS_POLL_INTERVAL) if not rlist: continue try: read_data = asocket.recv(1024) if read_data == "": break yield read_data except IOError, exception: if exception[0] not in (errno.EAGAIN, errno.EINTR): raise sys.exc_clear() continue except ssl.SSLError, exception: raise TlsException("Failed to communicate with Docker via TLS: %s" % exception, (sys.exc_info()[2]))
def prepare_ssh_client(session, vmuuid): username = api_helper.get_vm_xscontainer_username(session, vmuuid) host = api_helper.get_suitable_vm_ip(session, vmuuid, SSH_PORT) log.info("prepare_ssh_client for vm %s, via %s@%s" % (vmuuid, username, host)) client = paramiko.SSHClient() pkey = paramiko.rsakey.RSAKey.from_private_key( StringIO.StringIO(api_helper.get_idrsa_secret_private(session))) client.get_host_keys().clear() client.set_missing_host_key_policy(MyHostKeyPolicy(session, vmuuid)) try: client.connect(host, port=SSH_PORT, username=username, pkey=pkey, look_for_keys=False) except SshException: # This exception is already improved - leave it as it is raise except paramiko.AuthenticationException as exception: message = ("prepare_ssh_client failed to authenticate with private key" " on VM %s" % (vmuuid)) log.info(message) raise AuthenticationException(message) except (paramiko.SSHException, socket.error) as exception: # reraise as SshException raise SshException("prepare_ssh_client: %s" % exception, (sys.exc_info()[2])) return client
def load_cloud_config_template(template_path=None): if template_path: # Do nothing, specifying the path takes precedence. pass elif os.path.exists(CLOUD_CONFIG_OVERRIDE_PATH): # Use the override file template_path = CLOUD_CONFIG_OVERRIDE_PATH else: # Use the inbuilt default template this_dir, _ = os.path.split(__file__) template_path = os.path.join(this_dir, "data", "cloud-config.template") log.info("load_cloud_config_template from %s" % (template_path)) filehandle = open(template_path) try: template_data = filehandle.read() finally: filehandle.close() # Append template location to make it clear where it was loaded from. template_data = ("%s\n\n# Template loaded from %s" % (template_data, template_path)) return template_data
def install_vm(session, urlvhdbz2, sruuid, vmname='CoreOs-%d' % (random.randint(0, 1000)), templatename='CoreOS'): # devmode only log.info("install_vm from url %s to sr %s" % (urlvhdbz2, sruuid)) atempfile = tempfile.mkstemp(suffix='.vhd.bz2')[1] atempfileunpacked = atempfile.replace('.bz2', '') # @todo: pipe instead, so the file never actually touches Dom0 cmd = ['curl', '-o', atempfile, urlvhdbz2] util.runlocal(cmd) try: cmd = ['bzip2', '-d', atempfile] util.runlocal(cmd) vdiref = api_helper.import_disk(session, sruuid, atempfileunpacked, 'vhd', 'Disk') finally: if os.path.exists(atempfile): os.remove(atempfile) if os.path.exists(atempfileunpacked): os.remove(atempfileunpacked) templateref = session.xenapi.VM.get_by_name_label(templatename)[0] vmref = session.xenapi.VM.clone(templateref, vmname) vmuuid = session.xenapi.VM.get_record(vmref)['uuid'] log.info("install_vm created vm %s" % (vmuuid)) remove_disks_in_vm_provisioning(session, vmref) session.xenapi.VM.provision(vmref) api_helper.create_vbd(session, vmref, vdiref, 'rw', True) setup_network_on_lowest_pif(session, vmref) return vmuuid
def load_cloud_config_template(template_path=None): if template_path: # Do nothing, specifying the path takes precedence. pass elif os.path.exists(CLOUD_CONFIG_OVERRIDE_PATH): # Use the override file template_path = CLOUD_CONFIG_OVERRIDE_PATH else: # Use the inbuilt default template this_dir, _ = os.path.split(__file__) template_path = os.path.join(this_dir, "data", "cloud-config.template") log.info("load_cloud_config_template from %s" % (template_path)) filehandle = open(template_path) try: template_data = filehandle.read() finally: filehandle.close() # Append template location to make it clear where it was loaded from. template_data = ("%s\n\n# Template loaded from %s" % (template_data, template_path)) return template_data
def stop_monitoring(self, vm_ref): log.info("Removing monitor for VM ref: %s" % vm_ref) thevm = self.get_vm_by_ref(vm_ref) if thevm: self.deregister(thevm) thevm.stop_monitoring()
def install_vm(session, urlvhdbz2, sruuid, vmname='CoreOs-%d' % (random.randint(0, 1000)), templatename='CoreOS'): # devmode only log.info("install_vm from url %s to sr %s" % (urlvhdbz2, sruuid)) atempfile = tempfile.mkstemp(suffix='.vhd.bz2')[1] atempfileunpacked = atempfile.replace('.bz2', '') # @todo: pipe instead, so the file never actually touches Dom0 cmd = ['curl', '-o', atempfile, urlvhdbz2] util.runlocal(cmd) try: cmd = ['bzip2', '-d', atempfile] util.runlocal(cmd) vdiref = api_helper.import_disk(session, sruuid, atempfileunpacked, 'vhd', 'Disk') finally: if os.path.exists(atempfile): os.remove(atempfile) if os.path.exists(atempfileunpacked): os.remove(atempfileunpacked) templateref = session.xenapi.VM.get_by_name_label(templatename)[0] vmref = session.xenapi.VM.clone(templateref, vmname) vmuuid = session.xenapi.VM.get_record(vmref)['uuid'] log.info("install_vm created vm %s" % (vmuuid)) remove_disks_in_vm_provisioning(session, vmref) session.xenapi.VM.provision(vmref) api_helper.create_vbd(session, vmref, vdiref, 'rw', True) setup_network_on_lowest_pif(session, vmref) return vmuuid
def stop_monitoring(self, vm_ref): log.info("Removing monitor for VM ref: %s" % vm_ref) thevm = self.get_vm_by_ref(vm_ref) if thevm: self.deregister(thevm) thevm.stop_monitoring()
def create_config_drive_iso(session, userdata_template, vmuuid): log.info("create_config_drive_iso for vm %s" % (vmuuid)) umountrequired = False temptoolsisodir = None userdatafile = None latestfolder = None openstackfolder = None agentfilepaths = [] agentpath = None tempisodir = None try: tempisodir = tempfile.mkdtemp() tempisofile = tempfile.mkstemp()[1] # add the userdata-file openstackfolder = os.path.join(tempisodir, 'openstack') latestfolder = os.path.join(openstackfolder, 'latest') os.makedirs(latestfolder) userdatafile = os.path.join(latestfolder, 'user_data') userdatatemplatefile = "%s.template" % userdatafile template_data = get_template_data(session, vmuuid) userdata = customize_userdata(userdata_template, template_data) util.write_file(userdatafile, userdata) util.write_file(userdatatemplatefile, userdata_template) log.debug("Userdata: %s" % (userdata)) # Also add the Linux guest agent temptoolsisodir = tempfile.mkdtemp() tools_iso_path = find_latest_tools_iso_path() cmd = ['mount', '-o', 'loop', tools_iso_path, temptoolsisodir] util.runlocal(cmd) umountrequired = True agentpath = os.path.join(tempisodir, 'agent') os.makedirs(agentpath) agentfiles = ['xe-daemon', 'xe-linux-distribution', 'xe-linux-distribution.service', 'xen-vcpu-hotplug.rules', 'install.sh', 'versions.deb', 'versions.rpm', "versions.tgz"] for filename in agentfiles: path = os.path.join(temptoolsisodir, 'Linux', filename) shutil.copy(path, agentpath) agentfilepaths.append(os.path.join(agentpath, filename)) # Finally wrap up the iso util.make_iso('config-2', tempisodir, tempisofile) finally: # And tidy if umountrequired: cmd = ['umount', temptoolsisodir] util.runlocal(cmd) for path in [temptoolsisodir, userdatafile, userdatatemplatefile, latestfolder, openstackfolder] + agentfilepaths + \ [agentpath, tempisodir]: if path is not None: if os.path.isdir(path): os.rmdir(path) elif os.path.isfile(path): os.remove(path) else: log.debug("create_config_drive_iso: Not tidying %s because" " it could not be found" % (path)) return tempisofile
def _interact_with_api(session, vmuuid, request_type, request, message_error=False): provided_stdin = prepare_request_stdin(request_type, request) stdout = ssh_helper.execute_ssh(session, vmuuid, prepare_request_cmd(), stdin_input=provided_stdin) headerend = stdout.index('\r\n\r\n') header = stdout[:headerend] body = stdout[headerend + 4:] # ToDo: Should use re headersplits = header.split('\r\n', 2)[0].split(' ') # protocol = headersplits[0] statuscode = headersplits[1] if statuscode[0] != '2': # this did not work status = ' '.join(headersplits[2:]) failure_title = "Container Management Error" failure_body = body.strip() if failure_body == "": if statuscode == "304": # 304 does not have a body and is quite common. failure_body = ("The requested operation is currently not " "possible. Please try again later.") else: failure_body = ("The requested operation failed.") failure_body = failure_body + " (" + statuscode + ")" if ":" in failure_body: (failure_title, failure_body) = failure_body.split(":", 1) if message_error: api_helper.send_message(session, vmuuid, failure_title, failure_body) message = ("Request '%s' led to status %s - %s: %s" % (request, status, failure_title, failure_body)) log.info(message) raise util.XSContainerException(message) return body
def register_vm(vm_uuid, session): log.info("register_vm %s" % (vm_uuid)) client = XenAPIClient(session) thevm = VM(client, uuid=vm_uuid) thevm.update_other_config(docker_monitor.REGISTRATION_KEY, docker_monitor.REGISTRATION_KEY_ON) return
def __monitor_vm_events(self): session = self.get_session() vmuuid = self.get_uuid() ssh_client = ssh_helper.prepare_ssh_client(session, vmuuid) try: cmd = docker.prepare_request_cmd() log.info("__monitor_vm_events is running '%s' on VM '%s'" % (cmd, vmuuid)) stdin, stdout, _ = ssh_client.exec_command(cmd) stdin.write(docker.prepare_request_stdin('GET', '/events')) self._ssh_client = ssh_client # Not that we are listening for events, get the latest state docker.update_docker_ps(self) # set unblocking io for select.select stdout_fd = stdout.channel.fileno() fcntl.fcntl(stdout_fd, fcntl.F_SETFL, os.O_NONBLOCK | fcntl.fcntl(stdout_fd, fcntl.F_GETFL)) # @todo: should make this more sane skippedheader = False openbrackets = 0 data = "" while not self._stop_monitoring_request: rlist, _, _ = select.select([stdout_fd], [], [], MONITOR_EVENTS_POLL_INTERVAL) if not rlist: continue try: # @todo: should read more than one char at once lastread = stdout.read(1) except IOError, exception: if exception[0] not in (errno.EAGAIN, errno.EINTR): raise sys.exc_clear() continue if lastread == '': break data = data + lastread if (not skippedheader and lastread == "\n" and len(data) >= 4 and data[-4:] == "\r\n\r\n"): data = "" skippedheader = True elif lastread == '{': openbrackets = openbrackets + 1 elif lastread == '}': openbrackets = openbrackets - 1 if openbrackets == 0: event = simplejson.loads(data) self.handle_docker_event(event) data = "" if len(data) >= 2048: raise util.XSContainerException('__monitor_vm_events' + 'is full') finally: try: ssh_client.close() except Exception: util.log.exception("Error when closing ssh_client for %r" % ssh_client) log.info('__monitor_vm_events (%s) exited' % cmd)
def execute_docker_data_listen(session, vm_uuid, request, stop_monitoring_request): host = api_helper.get_suitable_vm_ip(session, vm_uuid, DOCKER_TLS_PORT) log.info("tls.execute_docker_listen_charbychar for VM %s, via %s" % (vm_uuid, host)) asocket = _get_socket(session, vm_uuid) try: asocket.connect((host, DOCKER_TLS_PORT)) if hasattr(asocket, 'version'): # Newer python versions provide the TLS version log.info("Connected VM %s using %s" % (vm_uuid, asocket.version())) asocket.send(request) asocket.setblocking(0) while not stop_monitoring_request: rlist, _, _ = select.select([asocket.fileno()], [], [], constants.MONITOR_EVENTS_POLL_INTERVAL) if not rlist: continue try: read_data = asocket.recv(1024) if read_data == "": break yield read_data except IOError, exception: if exception[0] not in (errno.EAGAIN, errno.EINTR): raise sys.exc_clear() continue except ssl.SSLError, exception: raise TlsException( "Failed to communicate with Docker via TLS: %s" % exception, (sys.exc_info()[2]))
def create_config_drive(session, vmuuid, sruuid, userdata): log.info("create_config_drive for vm %s on sr %s" % (vmuuid, sruuid)) vmref = session.xenapi.VM.get_by_uuid(vmuuid) vmrecord = session.xenapi.VM.get_record(vmref) prepare_vm_for_config_drive(session, vmref, vmuuid) isofile = create_config_drive_iso(session, userdata, vmuuid) other_config_keys = {OTHER_CONFIG_CONFIG_DRIVE_KEY: 'True'} try: configdisk_namelabel = 'Automatic Config Drive' vdiref = api_helper.import_disk(session, sruuid, isofile, 'raw', configdisk_namelabel, other_config_keys=other_config_keys) finally: os.remove(isofile) remove_config_drive(session, vmrecord, configdisk_namelabel) vbdref = api_helper.create_vbd(session, vmref, vdiref, 'ro', False, other_config_keys=other_config_keys) if vmrecord['power_state'] == 'Running': session.xenapi.VBD.plug(vbdref) if re.search("\n\s*- ssh-rsa %XSCONTAINERRSAPUB%", userdata): # if %XSRSAPUB% isn't commented out, automatically mark the VM # as monitorable. docker_monitor_api.mark_monitorable_vm(vmuuid, session) vdirecord = session.xenapi.VDI.get_record(vdiref) return vdirecord['uuid']
def create_config_drive_iso(session, userdata_template, vmuuid): log.info("create_config_drive_iso for vm %s" % (vmuuid)) umountrequired = False temptoolsisodir = None userdatafile = None latestfolder = None openstackfolder = None agentfilepaths = [] agentpath = None tempisodir = None try: tempisodir = tempfile.mkdtemp() tempisofile = tempfile.mkstemp()[1] # add the userdata-file openstackfolder = os.path.join(tempisodir, 'openstack') latestfolder = os.path.join(openstackfolder, 'latest') os.makedirs(latestfolder) userdatafile = os.path.join(latestfolder, 'user_data') userdatatemplatefile = "%s.template" % userdatafile template_data = get_template_data(session, vmuuid) userdata = customize_userdata(userdata_template, template_data) util.write_file(userdatafile, userdata) util.write_file(userdatatemplatefile, userdata_template) log.debug("Userdata: %s" % (userdata)) # Also add the Linux guest agent temptoolsisodir = tempfile.mkdtemp() tools_iso_path = find_latest_tools_iso_path() cmd = ['mount', '-o', 'loop', tools_iso_path, temptoolsisodir] util.runlocal(cmd) umountrequired = True agentpath = os.path.join(tempisodir, 'agent') os.makedirs(agentpath) agentfiles = [ 'xe-daemon', 'xe-linux-distribution', 'xe-linux-distribution.service', 'xen-vcpu-hotplug.rules', 'install.sh', 'versions.deb', 'versions.rpm', "versions.tgz" ] for filename in agentfiles: path = os.path.join(temptoolsisodir, 'Linux', filename) shutil.copy(path, agentpath) agentfilepaths.append(os.path.join(agentpath, filename)) # Finally wrap up the iso util.make_iso('config-2', tempisodir, tempisofile) finally: # And tidy if umountrequired: cmd = ['umount', temptoolsisodir] util.runlocal(cmd) for path in [temptoolsisodir, userdatafile, userdatatemplatefile, latestfolder, openstackfolder] + agentfilepaths + \ [agentpath, tempisodir]: if path is not None: if os.path.isdir(path): os.rmdir(path) elif os.path.isfile(path): os.remove(path) else: log.debug("create_config_drive_iso: Not tidying %s because" " it could not be found" % (path)) return tempisofile
def create_config_drive(session, vmuuid, sruuid, userdata): log.info("create_config_drive for vm %s on sr %s" % (vmuuid, sruuid)) vmref = session.xenapi.VM.get_by_uuid(vmuuid) vmrecord = session.xenapi.VM.get_record(vmref) prepare_vm_for_config_drive(session, vmref, vmuuid) isofile = create_config_drive_iso(session, userdata, vmuuid) other_config_keys = {OTHER_CONFIG_CONFIG_DRIVE_KEY: 'True'} try: configdisk_namelabel = 'Automatic Config Drive' vdiref = api_helper.import_disk(session, sruuid, isofile, 'raw', configdisk_namelabel, other_config_keys=other_config_keys) finally: os.remove(isofile) remove_config_drive(session, vmrecord, configdisk_namelabel) vbdref = api_helper.create_vbd(session, vmref, vdiref, 'ro', False, other_config_keys=other_config_keys) if vmrecord['power_state'] == 'Running': session.xenapi.VBD.plug(vbdref) if re.search("\n\s*- ssh-rsa %XSCONTAINERRSAPUB%", userdata): # if %XSRSAPUB% isn't commented out, automatically mark the VM # as monitorable. docker_monitor_api.mark_monitorable_vm(vmuuid, session) vdirecord = session.xenapi.VDI.get_record(vdiref) return vdirecord['uuid']
def export_disk(session, vdiuuid): log.info("export_disk vdi %s" % (vdiuuid)) filename = tempfile.mkstemp(suffix='.raw')[1] cmd = ['curl', '-k', '-o', filename, 'https://localhost/export_raw_vdi?session_id=%s&vdi=%s&format=raw' % (session.handle, vdiuuid)] util.runlocal(cmd) return filename
def export_disk(session, vdiuuid): log.info("export_disk vdi %s" % (vdiuuid)) filename = tempfile.mkstemp(suffix='.raw')[1] cmd = ['curl', '-L', '-k', '-o', filename, 'https://localhost/export_raw_vdi?session_id=%s&vdi=%s&format=raw' % (session.handle, vdiuuid)] util.runlocal(cmd) return filename
def set_idrsa_secret(session): log.info("set_idrsa_secret is generating a new secret") (privateidrsa, publicidrsa) = util.create_idrsa() set_pool_other_config_values(session, { XSCONTAINER_PRIVATE_SECRET_UUID: create_secret_return_uuid(session, privateidrsa), XSCONTAINER_PUBLIC_SECRET_UUID: create_secret_return_uuid(session, publicidrsa) })
def mark_monitorable_vm(vm_uuid, session): """ Ensure the VM has a REGISTRATION_KEY in vm:other_config. This key is used by XC to know whether monitoring is an option for this VM """ log.info("mark_monitorable_vm %s" % (vm_uuid)) client = XenAPIClient(session) thevm = VM(client, uuid=vm_uuid) other_config = thevm.get_other_config() if (docker_monitor.REGISTRATION_KEY not in other_config): deregister_vm(vm_uuid, session)
def execute_docker(session, vm_uuid, request): host = api_helper.get_suitable_vm_ip(session, vm_uuid, DOCKER_TLS_PORT) log.info("tls.execute_docker for VM %s, via %s" % (vm_uuid, host)) asocket = _get_socket(session, vm_uuid) try: asocket.connect((host, DOCKER_TLS_PORT)) asocket.send(request) result = asocket.recv(constants.MAX_BUFFER_SIZE) except ssl.SSLError, exception: raise TlsException("Failed to communicate with Docker via TLS: %s" % exception, (sys.exc_info()[2]))
def reinit_global_xapi_session(): global GLOBAL_XAPI_SESSION # Make threadsafe GLOBAL_XAPI_SESSION_LOCK.acquire() GLOBAL_XAPI_SESSION = init_local_api_session() GLOBAL_XAPI_SESSION_LOCK.release() log.info("The Global XAPI session has been updated.") return GLOBAL_XAPI_SESSION
def _wipe_monitor_error_message_if_needed(self): if self._error_message: try: log.info("_wipe_monitor_error_message needed for VM %s: %s" % (self.get_uuid(), self._error_message)) api_helper.destroy_message(self.get_session(), self._error_message) except XenAPI.Failure: # this can happen if the user deleted the message in the # meantime manually, or if XAPI is down pass self._error_message = None
def _wipe_monitor_error_message_if_needed(self): if self._error_message: try: log.info("_wipe_monitor_error_message needed for VM %s: %s" % (self.get_uuid(), self._error_message)) api_helper.destroy_message(self.get_session(), self._error_message) except XenAPI.Failure: # this can happen if the user deleted the message in the # meantime manually, or if XAPI is down pass self._error_message = None
def reinit_global_xapi_session(): global GLOBAL_XAPI_SESSION # Make threadsafe GLOBAL_XAPI_SESSION_LOCK.acquire() GLOBAL_XAPI_SESSION = init_local_api_session() GLOBAL_XAPI_SESSION_LOCK.release() log.info("The Global XAPI session has been updated.") return GLOBAL_XAPI_SESSION
def remove_config_drive(session, vmrecord, configdisk_namelabel): for vbd in vmrecord['VBDs']: vbdrecord = session.xenapi.VBD.get_record(vbd) vdirecord = None if vbdrecord['VDI'] != api_helper.NULLREF: vdirecord = session.xenapi.VDI.get_record(vbdrecord['VDI']) if OTHER_CONFIG_CONFIG_DRIVE_KEY in vdirecord['other_config']: log.info("remove_config_drive will destroy vdi %s" % (vdirecord['uuid'])) if vbdrecord['currently_attached']: session.xenapi.VBD.unplug(vbd) session.xenapi.VBD.destroy(vbd) session.xenapi.VDI.destroy(vbdrecord['VDI'])
def _send_monitor_error_message(self): self._wipe_monitor_error_message_if_needed() try: session = self.get_session() vmuuid = self.get_uuid() cause = remote_helper.determine_error_cause(session, vmuuid) log.info("_send_monitor_error_message for VM %s: %s" % (vmuuid, cause)) self._error_message = api_helper.send_message( session, vmuuid, "Container Management cannot monitor VM", cause) except (XenAPI.Failure): # this can happen when XAPI is not running. pass
def set_for_vm(session, vm_uuid, client_cert_content, client_key_content, ca_cert_content): _destroy_for_vm(session, vm_uuid) log.info("set_vm_tls_secrets is updating certs and keys for %s" % (vm_uuid)) content = { XSCONTAINER_TLS_CLIENT_CERT: api_helper.create_secret_return_uuid(session, client_cert_content), XSCONTAINER_TLS_CLIENT_KEY: api_helper.create_secret_return_uuid(session, client_key_content), XSCONTAINER_TLS_CA_CERT: api_helper.create_secret_return_uuid(session, ca_cert_content), } api_helper.update_vm_other_config(session, vm_uuid, content)
def remove_config_drive(session, vmrecord, configdisk_namelabel): for vbd in vmrecord['VBDs']: vbdrecord = session.xenapi.VBD.get_record(vbd) vdirecord = None if vbdrecord['VDI'] != api_helper.NULLREF: vdirecord = session.xenapi.VDI.get_record(vbdrecord['VDI']) if ((OTHER_CONFIG_CONFIG_DRIVE_KEY in vdirecord['other_config'] or OTHER_CONFIG_CONFIG_DRIVE_KEY in vbdrecord['other_config'])): log.info("remove_config_drive will destroy vdi %s" % (vdirecord['uuid'])) if vbdrecord['currently_attached']: session.xenapi.VBD.unplug(vbd) session.xenapi.VBD.destroy(vbd) session.xenapi.VDI.destroy(vbdrecord['VDI'])
def _destroy_for_vm(session, vm_uuid): log.info("destroy_tls_secrets is wiping certs and keys for %s" % (vm_uuid)) other_config = api_helper.get_vm_other_config(session, vm_uuid) for key in XSCONTAINER_TLS_KEYS: if key in other_config: tls_secret_uuid = other_config[key] # remove if there is no VMs other than this one who use the secret remove_if_refcount_less_or_equal(session, tls_secret_uuid, 1) temptlspaths = _get_temptlspaths(vm_uuid) for path in temptlspaths: if os.path.exists(path): if os.path.isdir(path): os.rmdir(path) else: os.remove(path)
def _destroy_for_vm(session, vm_uuid): log.info("destroy_tls_secrets is wiping certs and keys for %s" % (vm_uuid)) other_config = api_helper.get_vm_other_config(session, vm_uuid) for key in XSCONTAINER_TLS_KEYS: if key in other_config: tls_secret_uuid = other_config[key] # remove if there is no VMs other than this one who use the secret remove_if_refcount_less_or_equal(session, tls_secret_uuid, 1) temptlspaths = _get_temptlspaths(vm_uuid) for path in temptlspaths: if os.path.exists(path): if os.path.isdir(path): os.rmdir(path) else: os.remove(path)
def set_idrsa_secret(session): log.info("set_idrsa_secret is generating a new secret") (privateidrsa, publicidrsa) = util.create_idrsa() private_secret_ref = session.xenapi.secret.create( {'value': '%s' % (privateidrsa)}) public_secret_ref = session.xenapi.secret.create( {'value': '%s' % (publicidrsa)}) private_secret_record = session.xenapi.secret.get_record( private_secret_ref) public_secret_record = session.xenapi.secret.get_record(public_secret_ref) pool_ref = session.xenapi.pool.get_all()[0] other_config = session.xenapi.pool.get_other_config(pool_ref) other_config[XSCONTAINER_PRIVATE_SECRET_UUID] = private_secret_record[ 'uuid'] other_config[XSCONTAINER_PUBLIC_SECRET_UUID] = public_secret_record['uuid'] session.xenapi.pool.set_other_config(pool_ref, other_config)
def _send_monitor_error_message(self): self._wipe_monitor_error_message_if_needed() try: session = self.get_session() vmuuid = self.get_uuid() cause = docker.determine_error_cause(session, vmuuid) log.info("_send_monitor_error_message for VM %s: %s" % (vmuuid, cause)) self._error_message = api_helper.send_message( session, vmuuid, "Container Management cannot monitor VM", cause) except (XenAPI.Failure): # this can happen when XAPI is not running. pass
def set_idrsa_secret(session): log.info("set_idrsa_secret is generating a new secret") (privateidrsa, publicidrsa) = util.create_idrsa() private_secret_ref = session.xenapi.secret.create( {'value': '%s' % (privateidrsa)}) public_secret_ref = session.xenapi.secret.create( {'value': '%s' % (publicidrsa)}) private_secret_record = session.xenapi.secret.get_record( private_secret_ref) public_secret_record = session.xenapi.secret.get_record(public_secret_ref) pool_ref = session.xenapi.pool.get_all()[0] other_config = session.xenapi.pool.get_other_config(pool_ref) other_config[XSCONTAINER_PRIVATE_SECRET_UUID] = private_secret_record[ 'uuid'] other_config[XSCONTAINER_PUBLIC_SECRET_UUID] = public_secret_record['uuid'] session.xenapi.pool.set_other_config(pool_ref, other_config)
def set_for_vm(session, vm_uuid, client_cert_content, client_key_content, ca_cert_content): _destroy_for_vm(session, vm_uuid) log.info("set_vm_tls_secrets is updating certs and keys for %s" % (vm_uuid)) content = { XSCONTAINER_TLS_CLIENT_CERT: api_helper.create_secret_return_uuid(session, client_cert_content), XSCONTAINER_TLS_CLIENT_KEY: api_helper.create_secret_return_uuid(session, client_key_content), XSCONTAINER_TLS_CA_CERT: api_helper.create_secret_return_uuid(session, ca_cert_content), } api_helper.update_vm_other_config(session, vm_uuid, content)
def execute_docker(session, vm_uuid, request): host = api_helper.get_suitable_vm_ip(session, vm_uuid, DOCKER_TLS_PORT) log.info("tls.execute_docker for VM %s, via %s" % (vm_uuid, host)) asocket = _get_socket(session, vm_uuid) try: asocket.connect((host, DOCKER_TLS_PORT)) asocket.send(request) result = "" while len(result) < constants.MAX_BUFFER_SIZE: result_iteration = asocket.recv(constants.MAX_BUFFER_SIZE - len(result)) if result_iteration == "": break result += result_iteration except ssl.SSLError, exception: raise TlsException( "Failed to communicate with Docker via TLS: %s" % exception, (sys.exc_info()[2]))
def remove_if_refcount_less_or_equal(session, tls_secret_uuid, refcount_threshold): """ removes TLS secrets if there is fewer VMs using a secret as specified in refcount_threshold """ refcount = _get_refcount(session, tls_secret_uuid) if refcount > refcount_threshold: log.info("refcount for secret uuid %s is larger than threshold with %d" % (tls_secret_uuid, refcount)) # There's still more references than the threshold - keep return try: tls_secret_ref = session.xenapi.secret.get_by_uuid(tls_secret_uuid) session.xenapi.secret.destroy(tls_secret_ref) log.info("Deleted secret uuid %s with refcount %d" % (tls_secret_uuid, refcount)) except XenAPI.Failure: log.exception("Failed to delete secret uuid %s, moving on..." % (tls_secret_uuid))
def remove_if_refcount_less_or_equal(session, tls_secret_uuid, refcount_threshold): """ removes TLS secrets if there is fewer VMs using a secret as specified in refcount_threshold """ refcount = _get_refcount(session, tls_secret_uuid) if refcount > refcount_threshold: log.info( "refcount for secret uuid %s is larger than threshold with %d" % (tls_secret_uuid, refcount)) # There's still more references than the threshold - keep return try: tls_secret_ref = session.xenapi.secret.get_by_uuid(tls_secret_uuid) session.xenapi.secret.destroy(tls_secret_ref) log.info("Deleted secret uuid %s with refcount %d" % (tls_secret_uuid, refcount)) except XenAPI.Failure: log.exception("Failed to delete secret uuid %s, moving on..." % (tls_secret_uuid))
def get_config_drive_configuration(session, vdiuuid): log.info("get_config_drive_configuration from vdi %s" % (vdiuuid)) tempdir = None umountrequired = False filename = api_helper.export_disk(session, vdiuuid) try: tempdir = tempfile.mkdtemp() cmd = ['mount', '-o', 'loop', '-t', 'iso9660', filename, tempdir] util.runlocal(cmd) umountrequired = True userdatapath_template = os.path.join( tempdir, 'openstack', 'latest', 'user_data.template') content = util.read_file(userdatapath_template) finally: os.remove(filename) if umountrequired: cmd = ['umount', tempdir] util.runlocal(cmd) if tempdir: os.rmdir(tempdir) return content
def prepare_ssh_client(session, vmuuid): username = api_helper.get_vm_xscontainer_username(session, vmuuid) host = api_helper.get_suitable_vm_ip(session, vmuuid, SSH_PORT) log.info("prepare_ssh_client for vm %s, via %s@%s" % (vmuuid, username, host)) client = paramiko.SSHClient() pkey = paramiko.rsakey.RSAKey.from_private_key( StringIO.StringIO(api_helper.get_idrsa_secret_private(session))) client.get_host_keys().clear() client.set_missing_host_key_policy(MyHostKeyPolicy(session, vmuuid)) try: client.connect(host, port=SSH_PORT, username=username, pkey=pkey, look_for_keys=False) except SshException: # This exception is already improved - leave it as it is raise except paramiko.AuthenticationException, exception: message = ("prepare_ssh_client failed to authenticate with private key" " on VM %s" % (vmuuid)) log.info(message) raise AuthenticationException(message)
def get_config_drive_configuration(session, vdiuuid): log.info("get_config_drive_configuration from vdi %s" % (vdiuuid)) tempdir = None umountrequired = False filename = api_helper.export_disk(session, vdiuuid) try: tempdir = tempfile.mkdtemp() cmd = ['mount', '-o', 'loop', '-t', 'iso9660', filename, tempdir] util.runlocal(cmd) umountrequired = True userdatapath_template = os.path.join(tempdir, 'openstack', 'latest', 'user_data.template') content = util.read_file(userdatapath_template) finally: os.remove(filename) if umountrequired: cmd = ['umount', tempdir] util.runlocal(cmd) if tempdir: os.rmdir(tempdir) return content
def execute_ssh(session, vmuuid, cmd, stdin_input=None): # The heavy weight is docker ps with plenty of containers. # Assume 283 bytes per container. # 300KB should be enough for 1085 containers. max_read_size = 300 * 1024 client = None try: try: client = prepare_ssh_client(session, vmuuid) if isinstance(cmd, list): cmd = ' '.join(cmd) stripped_stdin_input = stdin_input if stripped_stdin_input: stripped_stdin_input = stripped_stdin_input.strip() log.info("execute_ssh will run '%s' with stdin '%s' on vm %s" % (cmd, stripped_stdin_input, vmuuid)) stdin, stdout, _ = client.exec_command(cmd) if stdin_input: stdin.write(stdin_input) stdin.channel.shutdown_write() output = stdout.read(max_read_size) if stdout.read(1) != "": raise SshException("too much data was returned when executing" "'%s'" % (cmd)) returncode = stdout.channel.recv_exit_status() if returncode != 0: log.info("execute_ssh '%s' on vm %s exited with rc %d: Stdout:" " %s" % (cmd, vmuuid, returncode, stdout)) raise SshException("Returncode for '%s' is not 0" % cmd) return output except SshException: # This exception is already improved - leave it as it is raise except Exception, exception: # reraise as SshException raise SshException("execute_ssh: %s" % exception, (sys.exc_info()[2])) finally: if client: client.close()
def _monitoring_loop(self): vmuuid = self.get_uuid() log.info("monitor_loop handles VM %s" % (vmuuid)) start_time = time.time() docker.wipe_docker_other_config(self) # keep track of when to wipe other_config to safe CPU-time while not self._stop_monitoring_request: try: docker.update_docker_info(self) docker.update_docker_version(self) # if we got past the above, it's about time to delete the # error message, as all appears to be working again self._wipe_monitor_error_message_if_needed() try: try: self.__monitor_vm_events() finally: docker.wipe_docker_other_config(self) except (XenAPI.Failure, util.XSContainerException): log.exception("__monitor_vm_events threw an exception, " "will retry") raise except (XenAPI.Failure, util.XSContainerException): passed_time = time.time() - start_time if (not self._error_message and passed_time >= MONITOR_TIMEOUT_WARNING_S): self._send_monitor_error_message() log.info("Could not connect to VM %s, will retry" % (vmuuid)) if not self._stop_monitoring_request: time.sleep(MONITORRETRYSLEEPINS) # Make sure that we don't leave back error messsages for VMs that are # not monitored anymore self._wipe_monitor_error_message_if_needed() log.info("monitor_loop returns from handling vm %s" % (vmuuid))
def execute_ssh(session, vmuuid, cmd, stdin_input=None): # The heavy weight is docker ps with plenty of containers. # Assume 283 bytes per container. # 300KB should be enough for 1085 containers. max_read_size = 300 * 1024 client = None try: try: client = prepare_ssh_client(session, vmuuid) if isinstance(cmd, list): cmd = ' '.join(cmd) stripped_stdin_input = stdin_input if stripped_stdin_input: stripped_stdin_input = stripped_stdin_input.strip() log.info("execute_ssh will run '%s' with stdin '%s' on vm %s" % (cmd, stripped_stdin_input, vmuuid)) stdin, stdout, _ = client.exec_command(cmd) if stdin_input: stdin.write(stdin_input) stdin.channel.shutdown_write() output = stdout.read(max_read_size) if stdout.read(1) != "": raise SshException("too much data was returned when executing" "'%s'" % (cmd)) returncode = stdout.channel.recv_exit_status() if returncode != 0: log.info("execute_ssh '%s' on vm %s exited with rc %d: Stdout:" " %s" % (cmd, vmuuid, returncode, stdout)) raise SshException("Returncode for '%s' is not 0" % cmd) return output except SshException: # This exception is already improved - leave it as it is raise except Exception, exception: # reraise as SshException raise SshException("execute_ssh: %s" % exception, (sys.exc_info()[2])) finally: if client: client.close()
def _monitoring_loop(self): vmuuid = self.get_uuid() log.info("monitor_loop handles VM %s" % (vmuuid)) start_time = time.time() docker.wipe_docker_other_config(self) # keep track of when to wipe other_config to safe CPU-time while not self._stop_monitoring_request: try: docker.update_docker_info(self) docker.update_docker_version(self) # if we got past the above, it's about time to delete the # error message, as all appears to be working again self._wipe_monitor_error_message_if_needed() try: try: self.__monitor_vm_events() finally: docker.wipe_docker_other_config(self) except (XenAPI.Failure, util.XSContainerException): log.exception("__monitor_vm_events threw an exception, " "will retry") raise except (XenAPI.Failure, util.XSContainerException): passed_time = time.time() - start_time if (not self._error_message and passed_time >= MONITOR_TIMEOUT_WARNING_S): self._send_monitor_error_message() log.info("Could not connect to VM %s, will retry" % (vmuuid)) if not self._stop_monitoring_request: time.sleep(MONITORRETRYSLEEPINS) # Make sure that we don't leave back error messsages for VMs that are # not monitored anymore self._wipe_monitor_error_message_if_needed() log.info("monitor_loop returns from handling vm %s" % (vmuuid))
def _interact_with_api(session, vmuuid, request_type, request, message_error=False): provided_stdin = prepare_request_stdin(request_type, request) stdout = ssh_helper.execute_ssh(session, vmuuid, prepare_request_cmd(), stdin_input=provided_stdin) headerend = stdout.index('\r\n\r\n') header = stdout[:headerend] body = stdout[headerend + 4:] # ToDo: Should use re headersplits = header.split('\r\n', 2)[0].split(' ') # protocol = headersplits[0] statuscode = headersplits[1] if statuscode[0] != '2': # this did not work status = ' '.join(headersplits[2:]) failure_title = "Container Management Error" failure_body = body.strip() if failure_body == "": if statuscode == "304": # 304 does not have a body and is quite common. failure_body = ("The requested operation is currently not " "possible. Please try again later.") else: failure_body = ("The requested operation failed.") failure_body = failure_body + " (" + statuscode + ")" if ":" in failure_body: (failure_title, failure_body) = failure_body.split(":", 1) if message_error: api_helper.send_message(session, vmuuid, failure_title, failure_body) message = ("Request '%s' led to status %s - %s: %s" % (request, status, failure_title, failure_body)) log.info(message) raise util.XSContainerException(message) return body
def execute_ssh(session, vmuuid, cmd, stdin_input=None): client = None try: try: client = prepare_ssh_client(session, vmuuid) if isinstance(cmd, list): cmd = ' '.join(cmd) stripped_stdin_input = stdin_input if stripped_stdin_input: stripped_stdin_input = stripped_stdin_input.strip() log.info("execute_ssh will run '%s' with stdin '%s' on vm %s" % (cmd, stripped_stdin_input, vmuuid)) stdin, stdout, _ = client.exec_command(cmd) if stdin_input: stdin.write(stdin_input) stdin.channel.shutdown_write() output = stdout.read(constants.MAX_BUFFER_SIZE) if stdout.read(1) != "": raise SshException("too much data was returned when executing" "'%s'" % (cmd)) returncode = stdout.channel.recv_exit_status() if returncode != 0: log.info("execute_ssh '%s' on vm %s exited with rc %d: Stdout:" " %s" % (cmd, vmuuid, returncode, stdout)) raise SshException("Returncode for '%s' is not 0" % cmd) return output except SshException: # This exception is already improved - leave it as it is raise except Exception as exception: # reraise as SshException raise SshException("execute_ssh: %s" % exception, (sys.exc_info()[2])) finally: if client: client.close()
def execute_ssh(session, vmuuid, cmd, stdin_input=None): client = None try: try: client = prepare_ssh_client(session, vmuuid) if isinstance(cmd, list): cmd = ' '.join(cmd) stripped_stdin_input = stdin_input if stripped_stdin_input: stripped_stdin_input = stripped_stdin_input.strip() log.info("execute_ssh will run '%s' with stdin '%s' on vm %s" % (cmd, stripped_stdin_input, vmuuid)) stdin, stdout, _ = client.exec_command(cmd) if stdin_input: stdin.write(stdin_input) stdin.channel.shutdown_write() output = stdout.read(constants.MAX_BUFFER_SIZE) if stdout.read(1) != "": raise SshException("too much data was returned when executing" "'%s'" % (cmd)) returncode = stdout.channel.recv_exit_status() if returncode != 0: log.info("execute_ssh '%s' on vm %s exited with rc %d: Stdout:" " %s" % (cmd, vmuuid, returncode, stdout)) raise SshException("Returncode for '%s' is not 0" % cmd) return output except SshException: # This exception is already improved - leave it as it is raise except Exception, exception: # reraise as SshException raise SshException("execute_ssh: %s" % exception, (sys.exc_info()[2])) finally: if client: client.close()
def import_disk(session, sruuid, filename, fileformat, namelabel, other_config_keys={}): log.info("import_disk file %s on sr %s" % (filename, sruuid)) targetsr = session.xenapi.SR.get_by_uuid(sruuid) sizeinb = None if fileformat == "vhd": cmd = ['vhd-util', 'query', '-n', filename, '-v'] sizeinmb = util.runlocal(cmd)[1] sizeinb = int(sizeinmb) * 1024 * 1024 elif fileformat == "raw": sizeinb = os.path.getsize(filename) # Workaround: can't otherwise import disks that aren't aligned to 2MB newsizeinb = sizeinb + \ ((2 * 1024 * 1024) - sizeinb % (2 * 1024 * 1024)) if sizeinb < newsizeinb: log.info('Resizing raw disk from size %d to %d' % (sizeinb, newsizeinb)) filehandle = open(filename, "r+b") filehandle.seek(newsizeinb - 1) filehandle.write("\0") filehandle.close() sizeinb = os.path.getsize(filename) else: raise Exception('Invalid fileformat: %s ' % fileformat) log.info("Preparing vdi of size %d" % (sizeinb)) vdiconf = { 'SR': targetsr, 'virtual_size': str(sizeinb), 'type': 'system', 'sharable': False, 'read_only': False, 'other_config': {}, 'name_label': namelabel } vdiref = session.xenapi.VDI.create(vdiconf) other_config = session.xenapi.VDI.get_other_config(vdiref) for key, value in other_config_keys.iteritems(): other_config[key] = value session.xenapi.VDI.set_other_config(vdiref, other_config) vdiuuid = session.xenapi.VDI.get_record(vdiref)['uuid'] cmd = [ 'curl', '-k', '--upload', filename, 'https://localhost/import_raw_vdi?session_id=%s&vdi=%s&format=%s' % (session.handle, vdiuuid, fileformat) ] util.runlocal(cmd) return vdiref
def execute_docker_data_listen(session, vmuuid, request, stop_monitoring_request): ssh_client = prepare_ssh_client(session, vmuuid) try: cmd = prepare_request_cmd() log.info( "execute_docker_listen_charbychar is running '%s' on VM '%s'" % (cmd, vmuuid)) stdin, stdout, _ = ssh_client.exec_command(cmd) stdin.write(request) # set unblocking io for select.select stdout_fd = stdout.channel.fileno() fcntl.fcntl(stdout_fd, fcntl.F_SETFL, os.O_NONBLOCK | fcntl.fcntl(stdout_fd, fcntl.F_GETFL)) while not stop_monitoring_request: rlist, _, _ = select.select([stdout_fd], [], [], constants.MONITOR_EVENTS_POLL_INTERVAL) if not rlist: continue try: read_data = stdout.read(1) if read_data == "": break yield read_data except IOError as exception: log.info("IOError") if exception[0] not in (errno.EAGAIN, errno.EINTR): log.info("Cleared") raise sys.exc_clear() finally: try: ssh_client.close() except Exception: util.log.exception("Error when closing ssh_client for %r" % ssh_client) log.info('execute_docker_listen_charbychar (%s) exited' % cmd)
def execute_docker_data_listen(session, vmuuid, request, stop_monitoring_request): ssh_client = prepare_ssh_client(session, vmuuid) try: cmd = prepare_request_cmd() log.info("execute_docker_listen_charbychar is running '%s' on VM '%s'" % (cmd, vmuuid)) stdin, stdout, _ = ssh_client.exec_command(cmd) stdin.write(request) # set unblocking io for select.select stdout_fd = stdout.channel.fileno() fcntl.fcntl(stdout_fd, fcntl.F_SETFL, os.O_NONBLOCK | fcntl.fcntl(stdout_fd, fcntl.F_GETFL)) while not stop_monitoring_request: rlist, _, _ = select.select([stdout_fd], [], [], constants.MONITOR_EVENTS_POLL_INTERVAL) if not rlist: continue try: read_data = stdout.read(1) if read_data == "": break yield read_data except IOError, exception: log.info("IOError") if exception[0] not in (errno.EAGAIN, errno.EINTR): log.info("Cleared") raise sys.exc_clear() finally: try: ssh_client.close() except Exception: util.log.exception("Error when closing ssh_client for %r" % ssh_client) log.info('execute_docker_listen_charbychar (%s) exited' % cmd)
def import_disk(session, sruuid, filename, fileformat, namelabel, other_config_keys={}): log.info("import_disk file %s on sr %s" % (filename, sruuid)) targetsr = session.xenapi.SR.get_by_uuid(sruuid) sizeinb = None if fileformat == "vhd": cmd = ['vhd-util', 'query', '-n', filename, '-v'] sizeinmb = util.runlocal(cmd)[1] sizeinb = int(sizeinmb) * 1024 * 1024 elif fileformat == "raw": sizeinb = os.path.getsize(filename) # Workaround: can't otherwise import disks that aren't aligned to 2MB newsizeinb = sizeinb + \ ((2 * 1024 * 1024) - sizeinb % (2 * 1024 * 1024)) if sizeinb < newsizeinb: log.info('Resizing raw disk from size %d to %d' % (sizeinb, newsizeinb)) filehandle = open(filename, "r+b") filehandle.seek(newsizeinb - 1) filehandle.write("\0") filehandle.close() sizeinb = os.path.getsize(filename) else: raise Exception('Invalid fileformat: %s ' % fileformat) log.info("Preparing vdi of size %d" % (sizeinb)) vdiconf = {'SR': targetsr, 'virtual_size': str(sizeinb), 'type': 'system', 'sharable': False, 'read_only': False, 'other_config': {}, 'name_label': namelabel} vdiref = session.xenapi.VDI.create(vdiconf) other_config = session.xenapi.VDI.get_other_config(vdiref) for key, value in other_config_keys.iteritems(): other_config[key] = value session.xenapi.VDI.set_other_config(vdiref, other_config) vdiuuid = session.xenapi.VDI.get_record(vdiref)['uuid'] cmd = ['curl', '-k', '--upload', filename, 'https://localhost/import_raw_vdi?session_id=%s&vdi=%s&format=%s' % (session.handle, vdiuuid, fileformat)] util.runlocal(cmd) return vdiref
def start_monitoring(self, vm_ref): log.info("Starting to monitor VM: %s" % vm_ref) thevm = MonitoredVM(self.host.client, ref=vm_ref) self.register(thevm) thevm.start_monitoring() return
def __monitor_vm_events(self): session = self.get_session() vmuuid = self.get_uuid() ssh_client = ssh_helper.prepare_ssh_client(session, vmuuid) try: cmd = docker.prepare_request_cmd() log.info("__monitor_vm_events is running '%s' on VM '%s'" % (cmd, vmuuid)) stdin, stdout, _ = ssh_client.exec_command(cmd) stdin.write(docker.prepare_request_stdin('GET', '/events')) stdin.channel.shutdown_write() self._ssh_client = ssh_client # Not that we are listening for events, get the latest state docker.update_docker_ps(self) # set unblocking io for select.select stdout_fd = stdout.channel.fileno() fcntl.fcntl(stdout_fd, fcntl.F_SETFL, os.O_NONBLOCK | fcntl.fcntl(stdout_fd, fcntl.F_GETFL)) # @todo: should make this more sane skippedheader = False openbrackets = 0 data = "" while not self._stop_monitoring_request: rlist, _, _ = select.select([stdout_fd], [], [], MONITOR_EVENTS_POLL_INTERVAL) if not rlist: continue try: # @todo: should read more than one char at once lastread = stdout.read(1) except IOError, exception: if exception[0] not in (errno.EAGAIN, errno.EINTR): raise sys.exc_clear() continue if lastread == '': break data = data + lastread if (not skippedheader and lastread == "\n" and len(data) >= 4 and data[-4:] == "\r\n\r\n"): data = "" skippedheader = True elif lastread == '{': openbrackets = openbrackets + 1 elif lastread == '}': openbrackets = openbrackets - 1 if openbrackets == 0: event = simplejson.loads(data) self.handle_docker_event(event) data = "" if len(data) >= 2048: raise util.XSContainerException('__monitor_vm_events' + 'is full') finally: try: ssh_client.close() except Exception: util.log.exception("Error when closing ssh_client for %r" % ssh_client) log.info('__monitor_vm_events (%s) exited' % cmd)
def start_monitoring(self, vm_ref): log.info("Starting to monitor VM: %s" % vm_ref) thevm = MonitoredVM(self.host.client, ref=vm_ref) self.register(thevm) thevm.start_monitoring() return