def add_to_aggregate(self, context, aggregate, host, slave_info=None): """Add a compute host to an aggregate.""" if not pool_states.is_hv_pool(aggregate.metadata): return if CONF.xenserver.independent_compute: raise exception.NotSupportedWithOption( operation='adding to a XenServer pool', option='CONF.xenserver.independent_compute') invalid = { pool_states.CHANGING: _('setup in progress'), pool_states.DISMISSED: _('aggregate deleted'), pool_states.ERROR: _('aggregate in error') } if (aggregate.metadata[pool_states.KEY] in invalid.keys()): raise exception.InvalidAggregateActionAdd( aggregate_id=aggregate.id, reason=invalid[aggregate.metadata[pool_states.KEY]]) if (aggregate.metadata[pool_states.KEY] == pool_states.CREATED): aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) if len(aggregate.hosts) == 1: # this is the first host of the pool -> make it master self._init_pool(aggregate.id, aggregate.name) # save metadata so that we can find the master again metadata = { 'master_compute': host, host: self._host_uuid, pool_states.KEY: pool_states.ACTIVE } aggregate.update_metadata(metadata) else: # the pool is already up and running, we need to figure out # whether we can serve the request from this host or not. master_compute = aggregate.metadata['master_compute'] if master_compute == CONF.host and master_compute != host: # this is the master -> do a pool-join # To this aim, nova compute on the slave has to go down. # NOTE: it is assumed that ONLY nova compute is running now self._join_slave(aggregate.id, host, slave_info.get('compute_uuid'), slave_info.get('url'), slave_info.get('user'), slave_info.get('passwd')) metadata = { host: slave_info.get('xenhost_uuid'), } aggregate.update_metadata(metadata) elif master_compute and master_compute != host: # send rpc cast to master, asking to add the following # host with specified credentials. slave_info = self._create_slave_info() self.compute_rpcapi.add_aggregate_host(context, host, aggregate, master_compute, slave_info)
def invalid_option(option_name, recommended_value): LOG.exception(_('Current value of ' 'CONF.xenserver.%(option)s option incompatible with ' 'CONF.xenserver.independent_compute=True. ' 'Consider using "%(recommended)s"'), {'option': option_name, 'recommended': recommended_value}) raise exception.NotSupportedWithOption( operation=option_name, option='CONF.xenserver.independent_compute')
def snapshot(self, context, instance, image_id, update_task_state): """Snapshots the specified instance. :param context: security context :param instance: nova.objects.instance.Instance :param image_id: Reference to a pre-created image that will hold the snapshot. :param update_task_state: Callback function to update the task_state on the instance while the snapshot operation progresses. The function takes a task_state argument and an optional expected_task_state kwarg which defaults to nova.compute.task_states.IMAGE_SNAPSHOT. See nova.objects.instance.Instance.save for expected_task_state usage. """ if not self.disk_dvr.capabilities.get('snapshot'): raise exc.NotSupportedWithOption( message=_("The snapshot operation is not supported in " "conjunction with a [powervm]/disk_driver setting " "of %s.") % CONF.powervm.disk_driver) self._log_operation('snapshot', instance) # Define the flow. flow = tf_lf.Flow("snapshot") # Notify that we're starting the process. flow.add( tf_img.UpdateTaskState(update_task_state, task_states.IMAGE_PENDING_UPLOAD)) # Connect the instance's boot disk to the management partition, and # scan the scsi bus and bring the device into the management partition. flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance)) # Notify that the upload is in progress. flow.add( tf_img.UpdateTaskState( update_task_state, task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)) # Stream the disk to glance. flow.add( tf_img.StreamToGlance(context, self.image_api, image_id, instance)) # Disconnect the boot disk from the management partition and delete the # device. flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance)) # Run the flow. tf_base.run(flow, instance=instance)