def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" logging.debug(_("Received %(service_name)s service update from " "%(host)s.") % locals()) service_caps = self.service_states.get(host, {}) capabilities["timestamp"] = utils.utcnow() # Reported time service_caps[service_name] = capabilities self.service_states[host] = service_caps
def snapshot(self, instance, image_id): """Create snapshot from a running VM instance. :param instance: instance to be snapshotted :param image_id: id of image to upload to Steps involved in a XenServer snapshot: 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to a 'base-copy' VDI. The base_copy is immutable and may be chained with other base_copies. If chained, the base_copies coalesce together, so, we must wait for this coalescing to occur to get a stable representation of the data on disk. 3. Push-to-glance: Once coalesced, we call a plugin on the XenServer that will bundle the VHDs together and then push the bundle into Glance. """ template_vm_ref = None try: template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) # call plugin to ship snapshot off to glance VMHelper.upload_image( self._session, instance, template_vdi_uuids, image_id) finally: if template_vm_ref: self._destroy(instance, template_vm_ref, shutdown=False, destroy_kernel_ramdisk=False) logging.debug(_("Finished snapshot and upload for VM %s"), instance)
def report_state(self): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() zone = FLAGS.node_availability_zone state_catalog = {} try: try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: logging.debug(_('The service database object disappeared, ' 'Recreating it.')) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) state_catalog['report_count'] = service_ref['report_count'] + 1 if zone != service_ref['availability_zone']: state_catalog['availability_zone'] = zone db.service_update(ctxt, self.service_id, state_catalog) # TODO(termie): make this pattern be more elegant. if getattr(self, 'model_disconnected', False): self.model_disconnected = False logging.error(_('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(self, 'model_disconnected', False): self.model_disconnected = True logging.exception(_('model server went away'))
def upload_image(cls, context, session, instance, vdi_uuids, image_id, options=None): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ # NOTE(sirp): Currently we only support uploading images as VHD, there # is no RAW equivalent (yet) logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) os_type = instance.os_type or FLAGS.default_os_type glance_host, glance_port = glance.pick_glance_api_server() params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': glance_host, 'glance_port': glance_port, 'sr_path': cls.get_sr_path(session), 'os_type': os_type, 'auth_token': getattr(context, 'auth_token', None), 'options': options} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(task, instance.id)
def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" logging.debug(_("Received %(service_name)s service update from " "%(host)s: %(capabilities)s") % locals()) service_caps = self.service_states.get(host, {}) service_caps[service_name] = capabilities self.service_states[host] = service_caps
def add_console(self, context, instance_id, password=None, port=None, **kwargs): instance = self.db.instance_get(context, instance_id) host = instance['host'] name = instance['name'] pool = self.get_pool_for_instance_host(context, host) try: console = self.db.console_get_by_pool_instance( context, pool['id'], instance_id) except exception.NotFound: logging.debug(_('Adding console')) if not password: password = utils.generate_password(8) if not port: port = self.driver.get_port(context) console_data = { 'instance_name': name, 'instance_id': instance_id, 'password': password, 'pool_id': pool['id'] } if port: console_data['port'] = port console = self.db.console_create(context, console_data) self.driver.setup_console(context, console) return console['id']
def upload_image(cls, context, session, instance, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ # NOTE(sirp): Currently we only support uploading images as VHD, there # is no RAW equivalent (yet) logging.debug( _("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) os_type = instance.os_type or FLAGS.default_os_type glance_host, glance_port = \ glance_image_service.pick_glance_api_server() params = { 'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': glance_host, 'glance_port': glance_port, 'sr_path': cls.get_sr_path(session), 'os_type': os_type, 'auth_token': getattr(context, 'auth_token', None) } kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(task, instance.id)
def create_vifs(self, instance, networks=None): """ Creates vifs for an instance """ vm_opaque_ref = self._get_vm_opaque_ref(instance.id) logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref) if networks is None: networks = db.network_get_all_by_instance(admin_context, instance['id']) # TODO(tr3buchet) - remove comment in multi-nic # this bit here about creating the vifs will be updated # in multi-nic to handle multiple IPs on the same network # and multiple networks # for now it works as there is only one of each for network in networks: bridge = network['bridge'] network_ref = \ NetworkHelper.find_network_with_bridge(self._session, bridge) if network_ref: try: device = "1" if instance._rescue else "0" except AttributeError: device = "0" VMHelper.create_vif( self._session, vm_opaque_ref, network_ref, instance.mac_address, device)
def report_state(self): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() try: try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: logging.debug(_("The service database object disappeared, " "Recreating it.")) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) db.service_update(ctxt, self.service_id, {'report_count': service_ref['report_count'] + 1}) # TODO(termie): make this pattern be more elegant. if getattr(self, "model_disconnected", False): self.model_disconnected = False logging.error(_("Recovered model server connection!")) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(self, "model_disconnected", False): self.model_disconnected = True logging.exception(_("model server went away"))
def _inject_network_info(self, instance, network_info, vm_ref=None): """ Generate the network info and make calls to place it into the xenstore and the xenstore param list. vm_ref can be passed in because it will sometimes be different than what VMHelper.lookup(session, instance.name) will find (ex: rescue) """ logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref) if vm_ref: # this function raises if vm_ref is not a vm_opaque_ref self._session.get_xenapi().VM.get_record(vm_ref) else: vm_ref = VMHelper.lookup(self._session, instance.name) for (network, info) in network_info: location = 'vm-data/networking/%s' % info['mac'].replace(':', '') self.write_to_param_xenstore(vm_ref, {location: info}) try: # TODO(tr3buchet): fix function call after refactor #self.write_to_xenstore(vm_ref, location, info) self._make_plugin_call('xenstore.py', 'write_record', instance, location, {'value': json.dumps(info)}, vm_ref) except KeyError: # catch KeyError for domid if instance isn't running pass
def __call__(self, req): try: user_id = req.headers['X_USER'] except KeyError: logging.debug("X_USER not found in request") return webob.exc.HTTPUnauthorized() # get the roles roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] if 'X_TENANT_ID' in req.headers: # This is the new header since Keystone went to ID/Name project_id = req.headers['X_TENANT_ID'] else: # This is for legacy compatibility project_id = req.headers['X_TENANT'] # Get the auth token auth_token = req.headers.get('X_AUTH_TOKEN', req.headers.get('X_STORAGE_TOKEN')) # Build a context, including the auth_token... remote_address = getattr(req, 'remote_address', '127.0.0.1') remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, roles=roles, auth_token=auth_token, strategy='keystone', remote_address=remote_address) req.environ['nova.context'] = ctx return self.application
def snapshot(self, instance, image_id): """Create snapshot from a running VM instance :param instance: instance to be snapshotted :param image_id: id of image to upload to Steps involved in a XenServer snapshot: 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to a 'base-copy' VDI. The base_copy is immutable and may be chained with other base_copies. If chained, the base_copies coalesce together, so, we must wait for this coalescing to occur to get a stable representation of the data on disk. 3. Push-to-glance: Once coalesced, we call a plugin on the XenServer that will bundle the VHDs together and then push the bundle into Glance. """ template_vm_ref = None try: template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) # call plugin to ship snapshot off to glance VMHelper.upload_image( self._session, instance, template_vdi_uuids, image_id) finally: if template_vm_ref: self._destroy(instance, template_vm_ref, shutdown=False, destroy_kernel_ramdisk=False) logging.debug(_("Finished snapshot and upload for VM %s"), instance)
def add_console(self, context, instance_id, password=None, port=None, **kwargs): instance = self.db.instance_get(context, instance_id) host = instance['host'] name = instance['name'] pool = self.get_pool_for_instance_host(context, host) try: console = self.db.console_get_by_pool_instance(context, pool['id'], instance_id) except exception.NotFound: logging.debug(_('Adding console')) if not password: password = utils.generate_password(8) if not port: port = self.driver.get_port(context) console_data = {'instance_name': name, 'instance_id': instance_id, 'password': password, 'pool_id': pool['id']} if port: console_data['port'] = port console = self.db.console_create(context, console_data) self.driver.setup_console(context, console) return console['id']
def _poll_zone(zone): """Eventlet worker to poll a zone.""" logging.debug(_("Polling zone: %s") % zone.api_url) try: zone.update_metadata(_call_novaclient(zone)) except Exception, e: zone.log_error(traceback.format_exc())
def upgrade(migrate_engine): meta.bind = migrate_engine # grab tables and (column for dropping later) instances = Table('instances', meta, autoload=True) networks = Table('networks', meta, autoload=True) fixed_ips = Table('fixed_ips', meta, autoload=True) c = instances.columns['mac_address'] # add interface column to networks table # values will have to be set manually before running nova try: networks.create_column(interface) except Exception: logging.error(_("interface column not added to networks table")) raise # create virtual_interfaces table try: virtual_interfaces.create() except Exception: logging.error(_("Table |%s| not created!"), repr(virtual_interfaces)) raise # add virtual_interface_id column to fixed_ips table try: fixed_ips.create_column(virtual_interface_id) except Exception: logging.error(_("VIF column not added to fixed_ips table")) raise # populate the virtual_interfaces table # extract data from existing instance and fixed_ip tables s = select( [instances.c.id, instances.c.mac_address, fixed_ips.c.network_id], fixed_ips.c.instance_id == instances.c.id) keys = ('instance_id', 'address', 'network_id') join_list = [dict(zip(keys, row)) for row in s.execute()] logging.debug(_("join list for moving mac_addresses |%s|"), join_list) # insert data into the table if join_list: i = virtual_interfaces.insert() i.execute(join_list) # populate the fixed_ips virtual_interface_id column s = select([fixed_ips.c.id, fixed_ips.c.instance_id], fixed_ips.c.instance_id != None) for row in s.execute(): m = select([virtual_interfaces.c.id]).\ where(virtual_interfaces.c.instance_id == row['instance_id']).\ as_scalar() u = fixed_ips.update().values(virtual_interface_id=m).\ where(fixed_ips.c.id == row['id']) u.execute() # drop the mac_address column from instances c.drop()
def upgrade(migrate_engine): meta.bind = migrate_engine # grab tables and (column for dropping later) instances = Table('instances', meta, autoload=True) networks = Table('networks', meta, autoload=True) fixed_ips = Table('fixed_ips', meta, autoload=True) c = instances.columns['mac_address'] # add interface column to networks table # values will have to be set manually before running nova try: networks.create_column(interface) except Exception: logging.error(_("interface column not added to networks table")) raise # create virtual_interfaces table try: virtual_interfaces.create() except Exception: logging.error(_("Table |%s| not created!"), repr(virtual_interfaces)) raise # add virtual_interface_id column to fixed_ips table try: fixed_ips.create_column(virtual_interface_id) except Exception: logging.error(_("VIF column not added to fixed_ips table")) raise # populate the virtual_interfaces table # extract data from existing instance and fixed_ip tables s = select([instances.c.id, instances.c.mac_address, fixed_ips.c.network_id], fixed_ips.c.instance_id == instances.c.id) keys = ('instance_id', 'address', 'network_id') join_list = [dict(zip(keys, row)) for row in s.execute()] logging.debug(_("join list for moving mac_addresses |%s|"), join_list) # insert data into the table if join_list: i = virtual_interfaces.insert() i.execute(join_list) # populate the fixed_ips virtual_interface_id column s = select([fixed_ips.c.id, fixed_ips.c.instance_id], fixed_ips.c.instance_id != None) for row in s.execute(): m = select([virtual_interfaces.c.id]).\ where(virtual_interfaces.c.instance_id == row['instance_id']).\ as_scalar() u = fixed_ips.update().values(virtual_interface_id=m).\ where(fixed_ips.c.id == row['id']) u.execute() # drop the mac_address column from instances c.drop()
def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" logging.debug( _("Received %(service_name)s service update from " "%(host)s: %(capabilities)s") % locals()) service_caps = self.service_states.get(host, {}) service_caps[service_name] = capabilities self.service_states[host] = service_caps
def _xvp_restart(self): logging.debug(_('Restarting xvp')) if not self._xvp_check_running(): logging.debug(_('xvp not running...')) self._xvp_start() else: pid = self._xvp_pid() os.kill(pid, signal.SIGUSR1)
def ping(self, context): """Ping should be called periodically to update zone status.""" diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context)
def ping(self, context=None): """Ping should be called periodically to update zone status.""" diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context)
def start(self): vcs_string = version.version_string_with_vcs() logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {'topic': self.topic, 'vcs_string': vcs_string}) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.Connection.instance(new=True) logging.debug("Creating Consumer connection for Service %s" % self.topic) # Share this same connection for these Consumers consumer_all = rpc.TopicAdapterConsumer( connection=self.conn, topic=self.topic, proxy=self) consumer_node = rpc.TopicAdapterConsumer( connection=self.conn, topic='%s.%s' % (self.topic, self.host), proxy=self) fanout = rpc.FanoutAdapterConsumer( connection=self.conn, topic=self.topic, proxy=self) consumer_set = rpc.ConsumerSet( connection=self.conn, consumer_list=[consumer_all, consumer_node, fanout]) # Wait forever, processing these consumers def _wait(): try: consumer_set.wait() finally: consumer_set.close() self.consumer_set_thread = greenthread.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) if self.periodic_interval: periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, now=False) self.timers.append(periodic)
def start(self): vcs_string = version.version_string_with_vcs() logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), { 'topic': self.topic, 'vcs_string': vcs_string }) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) logging.debug("Creating Consumer connection for Service %s" % self.topic) # Share this same connection for these Consumers consumer_all = rpc.create_consumer(self.conn, self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) consumer_node = rpc.create_consumer(self.conn, node_topic, self, fanout=False) fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True) consumers = [consumer_all, consumer_node, fanout] consumer_set = rpc.create_consumer_set(self.conn, consumers) # Wait forever, processing these consumers def _wait(): try: consumer_set.wait() finally: consumer_set.close() self.consumer_set_thread = greenthread.spawn(_wait) if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) if self.periodic_interval: periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, now=False) self.timers.append(periodic)
def import_class(import_str): """Returns a class from a string including module and class""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError), exc: logging.debug(_('Inner Exception: %s'), exc) raise exception.NotFound(_('Class %s cannot be found') % class_str)
def _poll_zone(zone): """Eventlet worker to poll a zone.""" name = zone.name url = zone.api_url logging.debug(_("Polling zone: %(name)s @ %(url)s") % locals()) try: zone.update_metadata(_call_novaclient(zone)) except Exception, e: zone.log_error(traceback.format_exc())
def remove_console(self, context, console_id, **_kwargs): try: console = self.db.console_get(context, console_id) except exception.NotFound: logging.debug(_('Tried to remove non-existant console ' '%(console_id)s.') % {'console_id': console_id}) return self.db.console_delete(context, console_id) self.driver.teardown_console(context, console)
def remove_console(self, context, console_id, **_kwargs): try: console = self.db.console_get(context, console_id) except exception.NotFound: logging.debug( _('Tried to remove non-existant console ' '%(console_id)s.') % {'console_id': console_id}) return self.db.console_delete(context, console_id) self.driver.teardown_console(context, console)
def _xvp_stop(self): logging.debug(_('Stopping xvp')) pid = self._xvp_pid() if not pid: return try: os.kill(pid, signal.SIGTERM) except OSError: #if it's already not running, no problem. pass
def _xvp_start(self): if self._xvp_check_running(): return logging.debug(_('Starting xvp')) try: utils.execute('xvp', '-p', FLAGS.console_xvp_pid, '-c', FLAGS.console_xvp_conf, '-l', FLAGS.console_xvp_log) except exception.ProcessExecutionError, err: logging.error(_('Error starting xvp: %s') % err)
def _xvp_start(self): if self._xvp_check_running(): return logging.debug(_("Starting xvp")) try: utils.execute('xvp -p %s -c %s -l %s' % (FLAGS.console_xvp_pid, FLAGS.console_xvp_conf, FLAGS.console_xvp_log)) except exception.ProcessExecutionError, err: logging.error(_("Error starting xvp: %s") % err)
def inject_network_info(self, instance): """ Generate the network info and make calls to place it into the xenstore and the xenstore param list """ # TODO(tr3buchet) - remove comment in multi-nic # I've decided to go ahead and consider multiple IPs and networks # at this stage even though they aren't implemented because these will # be needed for multi-nic and there was no sense writing it for single # network/single IP and then having to turn around and re-write it vm_opaque_ref = self._get_vm_opaque_ref(instance.id) logging.debug(_("injecting network info to xenstore for vm: |%s|"), vm_opaque_ref) admin_context = context.get_admin_context() IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) networks = db.network_get_all_by_instance(admin_context, instance['id']) for network in networks: network_IPs = [ip for ip in IPs if ip.network_id == network.id] def ip_dict(ip): return { "ip": ip.address, "netmask": network["netmask"], "enabled": "1"} def ip6_dict(ip6): return { "ip": ip6.addressV6, "netmask": ip6.netmaskV6, "gateway": ip6.gatewayV6, "enabled": "1"} mac_id = instance.mac_address.replace(':', '') location = 'vm-data/networking/%s' % mac_id mapping = { 'label': network['label'], 'gateway': network['gateway'], 'mac': instance.mac_address, 'dns': [network['dns']], 'ips': [ip_dict(ip) for ip in network_IPs], 'ip6s': [ip6_dict(ip) for ip in network_IPs]} self.write_to_param_xenstore(vm_opaque_ref, {location: mapping}) try: self.write_to_xenstore(vm_opaque_ref, location, mapping['location']) except KeyError: # catch KeyError for domid if instance isn't running pass return networks
def wait(): # After we've loaded up all our dynamic bits, check # whether we should print help flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpXMLFlag()) FLAGS.ParseNewFlags() logging.debug(_('Full set of FLAGS:')) for flag in FLAGS: flag_get = FLAGS.get(flag, None) logging.debug('%(flag)s : %(flag_get)s' % locals()) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop()
def _get_snapshot(self, instance): #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added logging.debug(_("Starting snapshot for VM %s"), instance) vm_ref = VMHelper.lookup(self._session, instance.name) label = "%s-snapshot" % instance.name try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) return template_vm_ref, template_vdi_uuids except self.XenAPI.Failure, exc: logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals()) return
def _rebuild_xvp_conf(self, context): logging.debug(_("Rebuilding xvp conf")) pools = [pool for pool in db.console_pool_get_all_by_host_type(context, self.host, self.console_type) if pool['consoles']] if not pools: logging.debug("No console pools!") self._xvp_stop() return conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port, 'pools': pools, 'pass_encode': self.fix_console_password} config = str(Template(self.xvpconf_template, searchList=[conf_data])) self._write_conf(config) self._xvp_restart()
def create_vifs(self, vm_ref, network_info): """Creates vifs for an instance""" logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref) # this function raises if vm_ref is not a vm_opaque_ref self._session.get_xenapi().VM.get_record(vm_ref) for device, (network, info) in enumerate(network_info): mac_address = info['mac'] bridge = network['bridge'] rxtx_cap = info.pop('rxtx_cap') network_ref = \ NetworkHelper.find_network_with_bridge(self._session, bridge) VMHelper.create_vif(self._session, vm_ref, network_ref, mac_address, device, rxtx_cap)
def _get_snapshot(self, instance): #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added logging.debug(_("Starting snapshot for VM %s"), instance) vm_ref = VMHelper.lookup(self._session, instance.name) label = "%s-snapshot" % instance.name try: template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot( self._session, instance.id, vm_ref, label) return template_vm_ref, template_vdi_uuids except self.XenAPI.Failure, exc: logging.error( _("Unable to Snapshot %(vm_ref)s: %(exc)s") % locals()) return
def upload_image(cls, session, instance_id, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) params = { "vdi_uuids": vdi_uuids, "image_id": image_id, "glance_host": FLAGS.glance_host, "glance_port": FLAGS.glance_port, } kwargs = {"params": pickle.dumps(params)} task = session.async_call_plugin("glance", "put_vdis", kwargs) session.wait_for_task(instance_id, task)
def _rebuild_xvp_conf(self, context): logging.debug(_("Rebuilding xvp conf")) pools = [ pool for pool in db.console_pool_get_all_by_host_type( context, self.host, self.console_type) if pool['consoles'] ] if not pools: logging.debug("No console pools!") self._xvp_stop() return conf_data = { 'multiplex_port': FLAGS.console_xvp_multiplex_port, 'pools': pools, 'pass_encode': self.fix_console_password } config = str(Template(self.xvpconf_template, searchList=[conf_data])) self._write_conf(config) self._xvp_restart()
def upload_image(cls, session, instance_id, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ # NOTE(sirp): Currently we only support uploading images as VHD, there # is no RAW equivalent (yet) logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, 'sr_path': cls.get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(task, instance_id)
def start(self): vcs_string = version.version_string_with_vcs() logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {'topic': self.topic, 'vcs_string': vcs_string}) self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: service_ref = db.service_get_by_args(ctxt, self.host, self.binary) self.service_id = service_ref['id'] except exception.NotFound: self._create_service_ref(ctxt) if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) self.conn = rpc.create_connection(new=True) logging.debug("Creating Consumer connection for Service %s" % self.topic) # Share this same connection for these Consumers self.conn.create_consumer(self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) self.conn.create_consumer(node_topic, self, fanout=False) self.conn.create_consumer(self.topic, self, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) self.timers.append(pulse) if self.periodic_interval: periodic = utils.LoopingCall(self.periodic_tasks) periodic.start(interval=self.periodic_interval, now=False) self.timers.append(periodic)
def inject_network_info(self, instance, vm_ref, network_info): """ Generate the network info and make calls to place it into the xenstore and the xenstore param list """ logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref) # this function raises if vm_ref is not a vm_opaque_ref self._session.get_xenapi().VM.get_record(vm_ref) for (network, info) in network_info: location = 'vm-data/networking/%s' % info['mac'].replace(':', '') self.write_to_param_xenstore(vm_ref, {location: info}) try: # TODO(tr3buchet): fix function call after refactor #self.write_to_xenstore(vm_ref, location, info) self._make_plugin_call('xenstore.py', 'write_record', instance, location, {'value': json.dumps(info)}, vm_ref) except KeyError: # catch KeyError for domid if instance isn't running pass
def wait(): # After we've loaded up all our dynamic bits, check # whether we should print help flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpXMLFlag()) FLAGS.ParseNewFlags() logging.debug(_('Full set of FLAGS:')) for flag in FLAGS: flag_get = FLAGS.get(flag, None) # hide flag contents from log if contains a password # should use secret flag when switch over to openstack-common if ("_password" in flag or "_key" in flag or (flag == "sql_connection" and "mysql:" in flag_get)): logging.debug('%(flag)s : FLAG SET ' % locals()) else: logging.debug('%(flag)s : %(flag_get)s' % locals()) try: _launcher.wait() except KeyboardInterrupt: _launcher.stop()
def _run_wsgi(paste_config_file, apis): logging.debug(_("Using paste.deploy config at: %s"), paste_config_file) apps = [] for api in apis: config = wsgi.load_paste_configuration(paste_config_file, api) if config is None: logging.debug(_("No paste configuration for app: %s"), api) continue logging.debug(_("App Config: %(api)s\n%(config)r") % locals()) logging.info(_("Running %s API"), api) app = wsgi.load_paste_app(paste_config_file, api) apps.append((app, getattr(FLAGS, "%s_listen_port" % api), getattr(FLAGS, "%s_listen" % api))) if len(apps) == 0: logging.error(_("No known API applications configured in %s."), paste_config_file) return server = wsgi.Server() for app in apps: server.start(*app) return server
def create(self, context, instance_type, image_id, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. """ if not instance_type: instance_type = instance_types.get_default_instance_type() num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: pid = context.project_id LOG.warn( _("Quota exceeeded for %(pid)s," " tried to run %(min_count)s instances") % locals()) raise quota.QuotaError( _("Instance quota exceeded. You can only " "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image['properties'].get('ramdisk_id', None) # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: self.image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] if not type(security_group) is list: security_group = [security_group] security_groups = [] self.ensure_default_security_group(context) for security_group_name in security_group: group = db.security_group_get_by_name(context, context.project_id, security_group_name) security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'instance_type_id': instance_type['id'], 'memory_mb': instance_type['memory_mb'], 'vcpus': instance_type['vcpus'], 'local_gb': instance_type['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone, 'os_type': os_type } elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] elevated = context.elevated() if not security_groups: security_groups = [] for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or instance.display_name is None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) pid = context.project_id uid = context.user_id LOG.debug( _("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) rpc.cast( context, FLAGS.scheduler_topic, { "method": "run_instance", "args": { "topic": FLAGS.compute_topic, "instance_id": instance_id, "availability_zone": availability_zone, "injected_files": injected_files } }) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) return [dict(x.iteritems()) for x in instances]