def __init__(self): super(ComputeTaskManager, self).__init__() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.image_api = image.API() self.servicegroup_api = servicegroup.API() self.scheduler_client = scheduler_client.SchedulerClient() self.notifier = rpc.get_notifier('compute', CONF.host)
def __init__(self): # super(CloudletAPI, self).__init__( # topic=CONF.compute_topic, # default_version=CloudletAPI.BASE_RPC_API_VERSION) super(CloudletAPI, self).__init__() self.nova_api = nova_api.API() self.image_api = image.API()
def init_host(self, host): """Initialize anything that is necessary for the driver to function. Includes catching up with currently running VMs on the given host. """ # Build the adapter. May need to attempt the connection multiple times # in case the PowerVM management API service is starting. # TODO(efried): Implement async compute service enable/disable like # I73a34eb6e0ca32d03e54d12a5e066b2ed4f19a61 self.adapter = pvm_apt.Adapter( pvm_apt.Session(conn_tries=60), helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper]) # Make sure the Virtual I/O Server(s) are available. pvm_par.validate_vios_ready(self.adapter) self.host_wrapper = pvm_ms.System.get(self.adapter)[0] # Do a scrub of the I/O plane to make sure the system is in good shape LOG.info("Clearing stale I/O connections on driver init.") pvm_stor.ComprehensiveScrub(self.adapter).execute() # Initialize the disk adapter # TODO(efried): Other disk adapters (localdisk), by conf selection. self.disk_dvr = ssp.SSPDiskAdapter(self.adapter, self.host_wrapper.uuid) self.image_api = image.API() LOG.info("The PowerVM compute driver has been initialized.")
def __init__(self, connection): """Initialize the DiskAdapter :param connection: connection information for the underlying driver """ self._connection = connection self.adapter = connection['adapter'] self.host_uuid = connection['host_uuid'] self.mp_uuid = connection['mp_uuid'] self.image_api = image.API()
def __init__(self, context, instance, destination, block_migration, disk_over_commit): self.context = context self.instance = instance self.destination = destination self.block_migration = block_migration self.disk_over_commit = disk_over_commit self.source = instance.host self.migrate_data = None self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.servicegroup_api = servicegroup.API() self.scheduler_client = scheduler_client.SchedulerClient() self.image_api = image.API()
def select_all_related_img(self, context, value): """ Filter images as to the property 'image_template' of images. """ target_images = [] image_api = image.API() # images = image_api.get_all(context) for img_uuid in value: img = image_api.get(context, img_uuid) if img: target_images.append(img) return target_images
def __init__(self, virtapi): # check if the current version of Windows is supported before any # further driver initialisation. self._check_minimum_windows_version() super(HyperVDriver, self).__init__(virtapi) self._hostops = hostops.HostOps() self._volumeops = volumeops.VolumeOps() self._vmops = vmops.VMOps(virtapi) self._snapshotops = snapshotops.SnapshotOps() self._livemigrationops = livemigrationops.LiveMigrationOps() self._migrationops = migrationops.MigrationOps() self._rdpconsoleops = rdpconsoleops.RDPConsoleOps() self._serialconsoleops = serialconsoleops.SerialConsoleOps() self._imagecache = imagecache.ImageCache() self._image_api = image.API()
def setUp(self): super(ServerActionsControllerTestV21, self).setUp() self.flags(group='glance', api_servers=['http://localhost:9292']) self.stub_out('nova.compute.api.API.get', fakes.fake_compute_get(vm_state=vm_states.ACTIVE, host='fake_host')) self.stub_out('nova.objects.Instance.save', lambda *a, **kw: None) fakes.stub_out_compute_api_snapshot(self) fake.stub_out_image_service(self) self.flags(enable_instance_password=True, group='api') self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6' self.controller = self._get_controller() self.compute_api = self.controller.compute_api # We don't care about anything getting as far as hitting the compute # RPC API so we just mock it out here. mock_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi') mock_rpcapi.start() self.addCleanup(mock_rpcapi.stop) # The project_id here matches what is used by default in # fake_compute_get which need to match for policy checks. self.req = fakes.HTTPRequest.blank('', project_id='fake_project') self.context = self.req.environ['nova.context'] self.image_api = image.API() # Assume that anything that hits the compute API and looks for a # RequestSpec doesn't care about it, since testing logic that deep # should be done in nova.tests.unit.compute.test_compute_api. mock_reqspec = mock.patch('nova.objects.RequestSpec') mock_reqspec.start() self.addCleanup(mock_reqspec.stop) # Similarly we shouldn't care about anything hitting conductor from # these tests. mock_conductor = mock.patch.object( self.controller.compute_api, 'compute_task_api') mock_conductor.start() self.addCleanup(mock_conductor.stop) # Assume that none of the tests are using ports with resource requests. self.mock_list_port = self.useFixture( fixtures.MockPatch( 'nova.network.neutronv2.api.API.list_ports')).mock self.mock_list_port.return_value = {'ports': []}
def setUp(self): super(ServerActionsControllerTestV21, self).setUp() self.flags(group='glance', api_servers=['http://localhost:9292']) self.stub_out('nova.db.api.instance_get_by_uuid', fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host='fake_host')) self.stub_out('nova.db.api.instance_update_and_get_original', instance_update_and_get_original) fakes.stub_out_nw_api(self) fakes.stub_out_compute_api_snapshot(self) fake.stub_out_image_service(self) self.flags(enable_instance_password=True, group='api') self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6' self.controller = self._get_controller() self.compute_api = self.controller.compute_api self.req = fakes.HTTPRequest.blank('') self.context = self.req.environ['nova.context'] self.image_api = image.API()
def __init__(self, context, instance, flavor, request_spec, source_migration, compute_rpcapi, host_selection, alternate_hosts): """Construct the CrossCellMigrationTask instance :param context: The user request auth context. This should be targeted to the source cell in which the instance is currently running. :param instance: The instance being migrated (from the source cell) :param flavor: The new flavor if performing resize and not just a cold migration :param request_spec: nova.objects.RequestSpec with scheduling details :param source_migration: nova.objects.Migration record for this operation (from the source cell) :param compute_rpcapi: instance of nova.compute.rpcapi.ComputeAPI :param host_selection: nova.objects.Selection of the initial selected target host from the scheduler where the selected host is in another cell which is different from the cell in which the instance is currently running :param alternate_hosts: list of 0 or more nova.objects.Selection objects representing alternate hosts within the same target cell as ``host_selection``. """ super(CrossCellMigrationTask, self).__init__(context, instance) self.request_spec = request_spec self.flavor = flavor self.source_migration = source_migration self.compute_rpcapi = compute_rpcapi self.host_selection = host_selection self.alternate_hosts = alternate_hosts self._target_cell_instance = None self._target_cell_context = None self.network_api = network.API() self.volume_api = cinder.API() self.image_api = nova_image.API() # Keep an ordered dict of the sub-tasks completed so we can call their # rollback routines if something fails. self._completed_tasks = collections.OrderedDict()
def __init__(self, virtapi): super(AzureDriver, self).__init__(virtapi) try: self.azure = Azure() self.disks = self.azure.compute.disks self.images = self.azure.compute.images except Exception as e: msg = (_LI("Initialize Azure Adapter failed. reason: %"), six.text_type(e)) LOG.error(msg) raise nova_ex.NovaException(message=msg) self.compute = self.azure.compute self.network = self.azure.network # self.storage = self.azure.storage self.resource = self.azure.resource # self.blob = self.azure.blob self._volume_api = cinder.API() self._image_api = image.API() self.cleanup_time = time.time() self.residual_nics = []
CONF = cfg.CONF CONF.register_opts(__imagebackend_opts, 'libvirt') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('preallocate_images', 'nova.virt.driver') CONF.import_opt('enabled', 'nova.compute.api', group='ephemeral_storage_encryption') CONF.import_opt('cipher', 'nova.compute.api', group='ephemeral_storage_encryption') CONF.import_opt('key_size', 'nova.compute.api', group='ephemeral_storage_encryption') CONF.import_opt('rbd_user', 'nova.virt.libvirt.volume', group='libvirt') CONF.import_opt('rbd_secret_uuid', 'nova.virt.libvirt.volume', group='libvirt') LOG = logging.getLogger(__name__) IMAGE_API = image.API() @six.add_metaclass(abc.ABCMeta) class Image(object): SUPPORTS_CLONE = False def __init__(self, source_type, driver_format, is_block_dev=False): """Image initialization. :source_type: block or file :driver_format: raw or qcow2 :is_block_dev: """ if (CONF.ephemeral_storage_encryption.enabled and
def __init__(self): super(ComputeTaskManager, self).__init__() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.image_api = image.API() self.scheduler_client = scheduler_client.SchedulerClient()
def __init__(self): self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI() self.image_api = image.API()
def info_from_instance(context, instance, network_info, system_metadata, **kw): """Get detailed instance information for an instance which is common to all notifications. :param:instance: nova.objects.Instance :param:network_info: network_info provided if not None :param:system_metadata: system_metadata DB entries for the instance, if not None .. note:: Currently unused here in trunk, but needed for potential custom modifications. """ try: # TODO(mriedem): We can eventually drop this when we no longer support # legacy notifications since versioned notifications don't use this. image_ref_url = image_api.API().generate_image_url( instance.image_ref, context) except ks_exc.EndpointNotFound: # We might be running from a periodic task with no auth token and # CONF.glance.api_servers isn't set, so we can't get the image API # endpoint URL from the service catalog, therefore just use the image # id for the URL (yes it's a lie, but it's best effort at this point). with excutils.save_and_reraise_exception() as exc_ctx: if context.auth_token is None: image_ref_url = instance.image_ref exc_ctx.reraise = False instance_type = instance.get_flavor() instance_type_name = instance_type.get('name', '') instance_flavorid = instance_type.get('flavorid', '') instance_info = dict( # Owner properties tenant_id=instance.project_id, user_id=instance.user_id, # Identity properties instance_id=instance.uuid, display_name=instance.display_name, reservation_id=instance.reservation_id, hostname=instance.hostname, # Type properties instance_type=instance_type_name, instance_type_id=instance.instance_type_id, instance_flavor_id=instance_flavorid, architecture=instance.architecture, # Capacity properties memory_mb=instance.flavor.memory_mb, disk_gb=instance.flavor.root_gb + instance.flavor.ephemeral_gb, vcpus=instance.flavor.vcpus, # Note(dhellmann): This makes the disk_gb value redundant, but # we are keeping it for backwards-compatibility with existing # users of notifications. root_gb=instance.flavor.root_gb, ephemeral_gb=instance.flavor.ephemeral_gb, # Location properties host=instance.host, node=instance.node, availability_zone=instance.availability_zone, cell_name=null_safe_str(instance.cell_name), # Date properties created_at=str(instance.created_at), # Terminated and Deleted are slightly different (although being # terminated and not deleted is a transient state), so include # both and let the recipient decide which they want to use. terminated_at=null_safe_isotime(instance.get('terminated_at', None)), deleted_at=null_safe_isotime(instance.get('deleted_at', None)), launched_at=null_safe_isotime(instance.get('launched_at', None)), # Image properties image_ref_url=image_ref_url, os_type=instance.os_type, kernel_id=instance.kernel_id, ramdisk_id=instance.ramdisk_id, # Status properties state=instance.vm_state, state_description=null_safe_str(instance.task_state), # NOTE(gibi): It might seems wrong to default the progress to an empty # string but this is how legacy work and this code only used by the # legacy notification so try to keep the compatibility here but also # keep it contained. progress=int(instance.progress) if instance.progress else '', # accessIPs access_ip_v4=instance.access_ip_v4, access_ip_v6=instance.access_ip_v6, ) if network_info is not None: fixed_ips = [] for vif in network_info: for ip in vif.fixed_ips(): ip["label"] = vif["network"]["label"] ip["vif_mac"] = vif["address"] fixed_ips.append(ip) instance_info['fixed_ips'] = fixed_ips # add image metadata image_meta_props = image_meta(instance.system_metadata) instance_info["image_meta"] = image_meta_props # add instance metadata instance_info['metadata'] = instance.metadata instance_info.update(kw) return instance_info
cfg.StrOpt('auth_uri', default='http://controller:5000/v2.0', help='Public Identity API endpoint'), ] CONF = cfg.CONF CONF.register_opts(auth_options, 'keystone_authtoken') LOG = logging.getLogger(__name__) nova_client = client.Client( username=CONF.keystone_authtoken.admin_user, password=CONF.keystone_authtoken.admin_password, tenant_name=CONF.keystone_authtoken.admin_tenant_name, auth_url=CONF.keystone_authtoken.auth_uri) image_api = image.API() def get_context(): creds = nova_client s_catalog = creds.service_catalog.catalog['serviceCatalog'] ctx = nova_context.RequestContext(user_id=creds.user_id, is_admin=True, project_id=creds.project_id, user_name=creds.username, project_name=creds.project_name, roles=['admin'], auth_token=creds.auth_token, remote_address=None, service_catalog=s_catalog, request_id=None)