Example #1
0
    def setUp(self):
        super(FloatingIpsBulkTest, self).setUp()
        pool = CONF.default_floating_pool
        interface = CONF.public_interface

        self.ip_pool = [
            {
                'address': "10.10.10.1",
                'pool': pool,
                'interface': interface,
                'host': None
                },
            {
                'address': "10.10.10.2",
                'pool': pool,
                'interface': interface,
                'host': None
                },
            {
                'address': "10.10.10.3",
                'pool': pool,
                'interface': interface,
                'host': "testHost"
                },
            ]
        self.compute.db.floating_ip_bulk_create(
            context.get_admin_context(), self.ip_pool)

        self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
            context.get_admin_context(), self.ip_pool)
Example #2
0
 def test_admin_no_overwrite(self):
     # If there is already a context in the cache creating an admin
     # context will not overwrite it.
     ctx1 = context.RequestContext('111',
                                   '222',
                                   overwrite=True)
     context.get_admin_context()
     self.assertIs(o_context.get_current(), ctx1)
Example #3
0
    def _metadata_as_json(self, version, path):
        metadata = {'uuid': self.uuid}
        if self.launch_metadata:
            metadata['meta'] = self.launch_metadata
        if self.files:
            metadata['files'] = self.files
        if self.extra_md:
            metadata.update(self.extra_md)
        if self.network_config:
            metadata['network_config'] = self.network_config
        if self.instance.key_name:
            metadata['public_keys'] = {
                self.instance.key_name: self.instance.key_data
            }

            if cells_opts.get_cell_type() == 'compute':
                cells_api = cells_rpcapi.CellsAPI()
                keypair = cells_api.get_keypair_at_top(
                    context.get_admin_context(), self.instance.user_id,
                    self.instance.key_name)
            else:
                try:
                    keypair = keypair_obj.KeyPair.get_by_name(
                        context.get_admin_context(), self.instance.user_id,
                        self.instance.key_name)
                except exception.KeypairNotFound:
                    # NOTE(mriedem): If the keypair was deleted from under us
                    # don't totally fail the request, just treat it as if the
                    # instance.key_name wasn't set.
                    keypair = None

            if keypair:
                metadata['keys'] = [{
                    'name': keypair.name,
                    'type': keypair.type,
                    'data': keypair.public_key
                }]
            else:
                LOG.debug(
                    "Unable to find keypair for instance with "
                    "key name '%s'.",
                    self.instance.key_name,
                    instance=self.instance)

        metadata['hostname'] = self._get_hostname()
        metadata['name'] = self.instance.display_name
        metadata['launch_index'] = self.instance.launch_index
        metadata['availability_zone'] = self.availability_zone

        if self._check_os_version(GRIZZLY, version):
            metadata['random_seed'] = base64.b64encode(os.urandom(512))

        if self._check_os_version(LIBERTY, version):
            metadata['project_id'] = self.instance.project_id

        self.set_mimetype(MIME_TYPE_APPLICATION_JSON)
        return jsonutils.dump_as_bytes(metadata)
    def test_volume_type_get_with_extra_specs(self):
        volume_type = storage.volume_type_get(
            context.get_admin_context(),
            self.volume_type1_id)
        self.assertEqual(self.vol_type1_specs, volume_type['extra_specs'])

        volume_type = storage.volume_type_get(
            context.get_admin_context(),
            self.vol_type2_id)
        self.assertEqual({}, volume_type['extra_specs'])
    def test_volume_type_get_by_name_with_extra_specs(self):
        volume_type = storage.volume_type_get_by_name(
            context.get_admin_context(),
            self.vol_type1['name'])
        self.assertEqual(self.vol_type1_specs, volume_type['extra_specs'])

        volume_type = storage.volume_type_get_by_name(
            context.get_admin_context(),
            self.vol_type2_noextra['name'])
        self.assertEqual({}, volume_type['extra_specs'])
 def test_volume_type_extra_specs_delete(self):
     expected_specs = self.vol_type1_specs.copy()
     del expected_specs['vol_extra2']
     storage.volume_type_extra_specs_delete(context.get_admin_context(),
                                       self.volume_type1_id,
                                       'vol_extra2')
     actual_specs = storage.volume_type_extra_specs_get(
         context.get_admin_context(),
         self.volume_type1_id)
     self.assertEqual(expected_specs, actual_specs)
Example #7
0
 def test_service_enabled_on_create_based_on_flag(self):
     self.flags(enable_new_services=True)
     host = 'foo'
     binary = 'storage-fake'
     app = service.Service.create(host=host, binary=binary)
     app.start()
     app.stop()
     ref = storage.service_get(context.get_admin_context(), app.service_id)
     storage.service_destroy(context.get_admin_context(), app.service_id)
     self.assertFalse(ref['disabled'])
Example #8
0
    def test_post_start_hook_child_cell(self):
        self.mox.StubOutWithMock(self.driver, 'start_servers')
        self.mox.StubOutWithMock(context, 'get_admin_context')
        self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')

        self.driver.start_servers(self.msg_runner)
        context.get_admin_context().AndReturn(self.ctxt)
        self.cells_manager._update_our_parents(self.ctxt)
        self.mox.ReplayAll()
        self.cells_manager.post_start_hook()
Example #9
0
    def _metadata_as_json(self, version, path):
        metadata = {'uuid': self.uuid}
        if self.launch_metadata:
            metadata['meta'] = self.launch_metadata
        if self.files:
            metadata['files'] = self.files
        if self.extra_md:
            metadata.update(self.extra_md)
        if self.network_config:
            metadata['network_config'] = self.network_config
        if self.instance.key_name:
            metadata['public_keys'] = {
                self.instance.key_name: self.instance.key_data
            }

            if cells_opts.get_cell_type() == 'compute':
                cells_api = cells_rpcapi.CellsAPI()
                keypair = cells_api.get_keypair_at_top(
                  context.get_admin_context(), self.instance.user_id,
                  self.instance.key_name)
            else:
                try:
                    keypair = keypair_obj.KeyPair.get_by_name(
                        context.get_admin_context(), self.instance.user_id,
                        self.instance.key_name)
                except exception.KeypairNotFound:
                    # NOTE(mriedem): If the keypair was deleted from under us
                    # don't totally fail the request, just treat it as if the
                    # instance.key_name wasn't set.
                    keypair = None

            if keypair:
                metadata['keys'] = [
                    {'name': keypair.name,
                     'type': keypair.type,
                     'data': keypair.public_key}
                ]
            else:
                LOG.debug("Unable to find keypair for instance with "
                          "key name '%s'.", self.instance.key_name,
                          instance=self.instance)

        metadata['hostname'] = self._get_hostname()
        metadata['name'] = self.instance.display_name
        metadata['launch_index'] = self.instance.launch_index
        metadata['availability_zone'] = self.availability_zone

        if self._check_os_version(GRIZZLY, version):
            metadata['random_seed'] = base64.b64encode(os.urandom(512))

        if self._check_os_version(LIBERTY, version):
            metadata['project_id'] = self.instance.project_id

        self.set_mimetype(MIME_TYPE_APPLICATION_JSON)
        return jsonutils.dump_as_bytes(metadata)
 def test_volume_type_extra_specs_update(self):
     expected_specs = self.vol_type1_specs.copy()
     expected_specs['vol_extra3'] = "4"
     storage.volume_type_extra_specs_update_or_create(
         context.get_admin_context(),
         self.volume_type1_id,
         dict(vol_extra3=4))
     actual_specs = storage.volume_type_extra_specs_get(
         context.get_admin_context(),
         self.volume_type1_id)
     self.assertEqual(expected_specs, actual_specs)
 def test_volume_type_extra_specs_create(self):
     expected_specs = self.vol_type1_specs.copy()
     expected_specs['vol_extra4'] = 'value4'
     expected_specs['vol_extra5'] = 'value5'
     storage.volume_type_extra_specs_update_or_create(
         context.get_admin_context(),
         self.volume_type1_id,
         dict(vol_extra4="value4",
              vol_extra5="value5"))
     actual_specs = storage.volume_type_extra_specs_get(
         context.get_admin_context(),
         self.volume_type1_id)
     self.assertEqual(expected_specs, actual_specs)
Example #12
0
    def test_flavor_manage_permissions(self):
        """Ensure that regular users can't create or delete flavors.

        """
        ctx = context.get_admin_context()
        flav1 = {'flavor': rand_flavor()}

        # Ensure user can't create flavor
        resp = self.user_api.api_post('flavors', flav1,
                                      check_response_status=False)
        self.assertEqual(403, resp.status)
        # ... and that it didn't leak through
        self.assertRaises(ex.FlavorNotFound,
                        compute.flavor_get_by_flavor_id,
                        ctx, flav1['flavor']['id'])

        # Create the flavor as the admin user
        self.api.api_post('flavors', flav1)

        # Ensure user can't delete flavors from our cloud
        resp = self.user_api.api_delete('flavors/%s' % flav1['flavor']['id'],
                                        check_response_status=False)
        self.assertEqual(403, resp.status)
        # ... and ensure that we didn't actually delete the flavor,
        # this will throw an exception if we did.
        compute.flavor_get_by_flavor_id(ctx, flav1['flavor']['id'])
Example #13
0
    def test_flavor_manage_func(self):
        """Basic flavor creation lifecycle testing.

        - Creating a flavor
        - Ensure it's in the database
        - Ensure it's in the listing
        - Delete it
        - Ensure it's hidden in the database
        """

        ctx = context.get_admin_context()
        flav1 = {
            'flavor': rand_flavor(),
         }

        # Create flavor and ensure it made it to the database
        self.api.api_post('flavors', flav1)

        flav1db = compute.flavor_get_by_flavor_id(ctx, flav1['flavor']['id'])
        self.assertFlavorDbEqual(flav1['flavor'], flav1db)

        # Ensure new flavor is seen in the listing
        resp = self.api.api_get('flavors')
        self.assertFlavorInList(flav1['flavor'], resp.body)

        # Delete flavor and ensure it was removed from the database
        self.api.api_delete('flavors/%s' % flav1['flavor']['id'])
        self.assertRaises(ex.FlavorNotFound,
                          compute.flavor_get_by_flavor_id,
                          ctx, flav1['flavor']['id'])

        resp = self.api.api_delete('flavors/%s' % flav1['flavor']['id'],
                                   check_response_status=False)
        self.assertEqual(404, resp.status)
Example #14
0
 def test_migrate_old_resize_record(self):
     db_migration = dict(fake_db_migration(), migration_type=None)
     with mock.patch('compute.compute.migration_get') as fake_get:
         fake_get.return_value = db_migration
         mig = compute.Migration.get_by_id(context.get_admin_context(), 1)
     self.assertTrue(mig.obj_attr_is_set('migration_type'))
     self.assertEqual('resize', mig.migration_type)
Example #15
0
def ec2_snap_id_to_uuid(ec2_id):
    """Get the corresponding UUID for the given ec2-id."""
    ctxt = context.get_admin_context()

    # NOTE(jgriffith) first strip prefix to get just the numeric
    int_id = ec2_id_to_id(ec2_id)
    return get_snapshot_uuid_from_int_id(ctxt, int_id)
Example #16
0
    def init_host_floating_ips(self):
        """Configures floating IPs owned by host."""

        admin_context = context.get_admin_context()
        try:
            floating_ips = objects.FloatingIPList.get_by_host(admin_context,
                                                              self.host)
        except exception.NotFound:
            return

        for floating_ip in floating_ips:
            if floating_ip.fixed_ip_id:
                try:
                    fixed_ip = floating_ip.fixed_ip
                except exception.FixedIpNotFound:
                    LOG.debug('Fixed IP %s not found', floating_ip.fixed_ip_id)
                    continue
                interface = CONF.public_interface or floating_ip.interface
                try:
                    self.l3driver.add_floating_ip(floating_ip.address,
                                                  fixed_ip.address,
                                                  interface,
                                                  fixed_ip.network)
                except processutils.ProcessExecutionError:
                    LOG.debug('Interface %s not found', interface)
                    raise exception.NoFloatingIpInterface(interface=interface)
Example #17
0
def revoke_certs_by_project(project_id):
    """Revoke all project certs."""
    # NOTE(vish): This is somewhat useless because we can just shut down
    #             the vpn.
    admin = context.get_admin_context()
    for cert in db.certificate_get_all_by_project(admin, project_id):
        revoke_cert(cert['project_id'], cert['file_name'])
Example #18
0
def setup_profiler(binary, host):
    if (osprofiler_notifier is None or
                profiler is None or
                osprofiler_web is None or
                profiler_opts is None):
        LOG.debug('osprofiler is not present')
        return

    if CONF.profiler.enabled:
        _notifier = osprofiler_notifier.create(
            "Messaging", messaging, context.get_admin_context().to_dict(),
            rpc.TRANSPORT, "cinder", binary, host)
        osprofiler_notifier.set(_notifier)
        osprofiler_web.enable(CONF.profiler.hmac_keys)
        LOG.warning(
            _LW("OSProfiler is enabled.\nIt means that person who knows "
                "any of hmac_keys that are specified in "
                "/etc/cinder/cinder.conf can trace his requests. \n"
                "In real life only operator can read this file so there "
                "is no security issue. Note that even if person can "
                "trigger profiler, only admin user can retrieve trace "
                "information.\n"
                "To disable OSprofiler set in cinder.conf:\n"
                "[profiler]\nenabled=false"))
    else:
        osprofiler_web.disable()
    def _encryption_update_bad_body(self, update_body, msg):

        # Create Volume Type and Encryption
        volume_type = self._default_volume_type
        res = self._create_type_and_encryption(volume_type)

        # Update Encryption
        res = self.\
            _get_response(volume_type, req_method='PUT',
                          req_body=jsonutils.dump_as_bytes(update_body),
                          req_headers='application/json',
                          url='/v2/fake/types/%s/encryption/fake_type_id')

        res_dict = jsonutils.loads(res.body)

        expected = {
            'badRequest': {
                'code': 400,
                'message': (msg)
            }
        }

        # Confirm Failure
        self.assertEqual(expected, res_dict)
        storage.volume_type_destroy(context.get_admin_context(), volume_type['id'])
Example #20
0
def get_metadata_by_address(address):
    ctxt = context.get_admin_context()
    fixed_ip = network.API().get_fixed_ip_by_address(ctxt, address)

    return get_metadata_by_instance_id(fixed_ip['instance_uuid'],
                                       address,
                                       ctxt)
Example #21
0
def revoke_certs_by_project(project_id):
    """Revoke all project certs."""
    # NOTE(vish): This is somewhat useless because we can just shut down
    #             the vpn.
    admin = context.get_admin_context()
    for cert in db.certificate_get_all_by_project(admin, project_id):
        revoke_cert(cert['project_id'], cert['file_name'])
Example #22
0
    def start(self):
        """Start serving this service using loaded configuration.

        Also, retrieve updated port number in case '0' was passed in, which
        indicates a random port should be used.

        :returns: None

        """
        ctxt = context.get_admin_context()
        service_ref = objects.Service.get_by_host_and_binary(ctxt, self.host,
                                                             self.binary)
        if not service_ref:
            try:
                service_ref = _create_service_ref(self, ctxt)
            except (exception.ServiceTopicExists,
                    exception.ServiceBinaryExists):
                # NOTE(danms): If we race to create a record wth a sibling,
                # don't fail here.
                service_ref = objects.Service.get_by_host_and_binary(
                    ctxt, self.host, self.binary)
        _update_service_ref(service_ref, ctxt)

        if self.manager:
            self.manager.init_host()
            self.manager.pre_start_hook()
            if self.backdoor_port is not None:
                self.manager.backdoor_port = self.backdoor_port
        self.server.start()
        if self.manager:
            self.manager.post_start_hook()
    def setUp(self):
        super(FlashSystemISCSIDriverTestCase, self).setUp()

        self._def_flags = {'san_ip': 'hostname',
                           'san_login': '******',
                           'san_password': '******',
                           'flashsystem_connection_protocol': 'iSCSI',
                           'flashsystem_multihostmap_enabled': True,
                           'iscsi_ip_address': '192.168.1.10',
                           'flashsystem_iscsi_portid': 1}

        self.connector = {
            'host': 'flashsystem',
            'wwnns': ['0123456789abcdef', '0123456789abcdeg'],
            'wwpns': ['abcd000000000001', 'abcd000000000002'],
            'initiator': 'iqn.123456'}

        self.sim = FlashSystemManagementSimulator()
        self.driver = FlashSystemFakeISCSIDriver(
            configuration=conf.Configuration(None))
        self.driver.set_fake_storage(self.sim)

        self._reset_flags()
        self.ctxt = context.get_admin_context()
        self.driver.do_setup(None)
        self.driver.check_for_setup_error()

        self.sleeppatch = mock.patch('eventlet.greenthread.sleep')
        self.sleeppatch.start()
Example #24
0
 def _async_init_instance_info():
     context = context_module.get_admin_context()
     LOG.debug("START:_async_init_instance_info")
     self._instance_info = {}
     compute_nodes = objects.ComputeNodeList.get_all(context).objects
     LOG.debug("Total number of compute nodes: %s", len(compute_nodes))
     # Break the queries into batches of 10 to reduce the total number
     # of calls to the DB.
     batch_size = 10
     start_node = 0
     end_node = batch_size
     while start_node <= len(compute_nodes):
         curr_nodes = compute_nodes[start_node:end_node]
         start_node += batch_size
         end_node += batch_size
         filters = {
             "host": [curr_node.host for curr_node in curr_nodes],
             "deleted": False
         }
         result = objects.InstanceList.get_by_filters(context, filters)
         instances = result.objects
         LOG.debug("Adding %s instances for hosts %s-%s",
                   len(instances), start_node, end_node)
         for instance in instances:
             host = instance.host
             if host not in self._instance_info:
                 self._instance_info[host] = {
                     "instances": {},
                     "updated": False
                 }
             inst_dict = self._instance_info[host]
             inst_dict["instances"][instance.uuid] = instance
         # Call sleep() to cooperatively yield
         time.sleep(0)
     LOG.debug("END:_async_init_instance_info")
Example #25
0
def ec2_snap_id_to_uuid(ec2_id):
    """Get the corresponding UUID for the given ec2-id."""
    ctxt = context.get_admin_context()

    # NOTE(jgriffith) first strip prefix to get just the numeric
    int_id = ec2_id_to_id(ec2_id)
    return get_snapshot_uuid_from_int_id(ctxt, int_id)
Example #26
0
 def test_archive_deleted_rows(self):
     # Boots a server, deletes it, and then tries to archive it.
     server = self._create_server()
     server_id = server['id']
     # Assert that there are instance_actions. instance_actions are
     # interesting since we don't soft delete them but they have a foreign
     # key back to the instances table.
     actions = self.api.get_instance_actions(server_id)
     self.assertTrue(len(actions),
                     'No instance actions for server: %s' % server_id)
     self._delete_server(server_id)
     # Verify we have the soft deleted instance in the database.
     admin_context = context.get_admin_context(read_deleted='yes')
     # This will raise InstanceNotFound if it's not found.
     instance = compute.instance_get_by_uuid(admin_context, server_id)
     # Make sure it's soft deleted.
     self.assertNotEqual(0, instance.deleted)
     # Verify we have some system_metadata since we'll check that later.
     self.assertTrue(len(instance.system_metadata),
                     'No system_metadata for instance: %s' % server_id)
     # Now try and archive the soft deleted records.
     results = compute.archive_deleted_rows(max_rows=100)
     # verify system_metadata was dropped
     self.assertIn('instance_system_metadata', results)
     self.assertEqual(len(instance.system_metadata),
                      results['instance_system_metadata'])
     # Verify that instances rows are dropped
     self.assertIn('instances', results)
     # Verify that instance_actions and actions_event are dropped
     # by the archive
     self.assertIn('instance_actions', results)
     self.assertIn('instance_actions_events', results)
Example #27
0
def get_metadata_by_instance_id(instance_id, address, ctxt=None):
    ctxt = ctxt or context.get_admin_context()
    instance = objects.Instance.get_by_uuid(
        ctxt, instance_id, expected_attrs=['ec2_ids', 'flavor', 'info_cache',
                                           'metadata', 'system_metadata',
                                           'security_groups'])
    return InstanceMetadata(instance, address)
Example #28
0
    def test_create_export(self):
        expected_result = {'location': '10.9.8.7:3260,1 ' +
                           self.iscsi_target_prefix +
                           self.testvol['name'] + ' 1',
                           'auth': 'CHAP QZJb P68e'}

        with mock.patch('storage.utils.execute', return_value=('', '')),\
                mock.patch.object(self.target, '_get_target',
                                  side_effect=lambda x: 1),\
                mock.patch.object(self.target, '_verify_backing_lun',
                                  side_effect=lambda x, y: True),\
                mock.patch.object(self.target, '_get_target_chap_auth',
                                  side_effect=lambda x, y: None) as m_chap,\
                mock.patch.object(vutils, 'generate_username',
                                  side_effect=lambda: 'QZJb'),\
                mock.patch.object(vutils, 'generate_password',
                                  side_effect=lambda: 'P68e'):

            ctxt = context.get_admin_context()
            self.assertEqual(expected_result,
                             self.target.create_export(ctxt,
                                                       self.testvol,
                                                       self.fake_volumes_dir))

            m_chap.side_effect = lambda x, y: ('otzL', '234Z')

            expected_result['auth'] = ('CHAP otzL 234Z')

            self.assertEqual(expected_result,
                             self.target.create_export(ctxt,
                                                       self.testvol,
                                                       self.fake_volumes_dir))
Example #29
0
    def test_attach_volume_to_server(self):
        self.stub_out('compute.volume.cinder.API.get', fakes.stub_volume_get)
        self.stub_out('compute.volume.cinder.API.check_attach',
                      lambda *a, **k: None)
        self.stub_out('compute.volume.cinder.API.reserve_volume',
                      lambda *a, **k: None)
        device_name = '/dev/vdd'
        bdm = compute.BlockDeviceMapping()
        bdm['device_name'] = device_name
        self.stub_out(
            'compute.compute.manager.ComputeManager.reserve_block_device_name',
            lambda *a, **k: bdm)
        self.stub_out(
            'compute.compute.manager.ComputeManager.attach_volume',
            lambda *a, **k: None)
        self.stub_out(
            'compute.compute.BlockDeviceMapping.get_by_volume_and_instance',
            classmethod(lambda *a, **k: None))

        volume = fakes.stub_volume_get(None, context.get_admin_context(),
                                       'a26887c6-c47b-4654-abb5-dfadf7d3f803')
        subs = {
            'volume_id': volume['id'],
            'device': device_name
        }
        server_id = self._post_server()
        response = self._do_post('servers/%s/os-volume_attachments'
                                 % server_id,
                                 'attach-volume-to-server-req', subs)

        self._verify_response('attach-volume-to-server-resp', subs,
                              response, 200)
Example #30
0
 def _init_aggregates(self):
     elevated = context_module.get_admin_context()
     aggs = objects.AggregateList.get_all(elevated)
     for agg in aggs:
         self.aggs_by_id[agg.id] = agg
         for host in agg.hosts:
             self.host_aggregates_map[host].add(agg.id)
Example #31
0
    def test_list_resizing_instances(self):
        instances = [{'image_ref': '1',
                      'host': CONF.host,
                      'id': '1',
                      'uuid': '123',
                      'vm_state': vm_states.RESIZED,
                      'task_state': None}]

        all_instances = [fake_instance.fake_instance_obj(None, **instance)
                         for instance in instances]

        image_cache_manager = imagecache.ImageCacheManager()
        self.mox.StubOutWithMock(compute.block_device.BlockDeviceMappingList,
                   'get_by_instance_uuid')

        ctxt = context.get_admin_context()
        bdms = block_device_obj.block_device_make_list_from_dicts(
            ctxt, swap_bdm_256)
        compute.block_device.BlockDeviceMappingList.get_by_instance_uuid(
                ctxt, '123').AndReturn(bdms)

        self.mox.ReplayAll()
        running = image_cache_manager._list_running_instances(ctxt,
            all_instances)

        self.assertEqual(1, len(running['used_images']))
        self.assertEqual((1, 0, ['instance-00000001']),
                         running['used_images']['1'])
        self.assertEqual(set(['instance-00000001', '123',
                              'instance-00000001_resize', '123_resize']),
                         running['instance_names'])

        self.assertEqual(1, len(running['image_popularity']))
        self.assertEqual(1, running['image_popularity']['1'])
Example #32
0
    def setUp(self):
        super(SchedulerReportClientTestCase, self).setUp()
        self.context = context.get_admin_context()

        self.flags(use_local=True, group='conductor')

        self.client = scheduler_report_client.SchedulerReportClient()
Example #33
0
    def test_ensure_export_chap(self, mock_execute,
                                mock_get_target,
                                mock_scst_execute):
        mock_execute.return_value = (None, None)
        mock_scst_execute.return_value = (None, None)
        mock_get_target.return_value = 1
        ctxt = context.get_admin_context()

        def _fake_get_target_and_lun(*args, **kwargs):
            return 0, 1

        def _fake_get_target_chap_auth(*args, **kwargs):
            return None

        with mock.patch.object(self.target, 'create_iscsi_target'),\
                mock.patch.object(self.target, '_get_target_chap_auth',
                                  side_effect=_fake_get_target_chap_auth),\
                mock.patch.object(self.target, '_get_target_and_lun',
                                  side_effect=_fake_get_target_and_lun):
            self.target.ensure_export(ctxt,
                                      self.testvol,
                                      self.fake_volumes_dir)
            self.target.create_iscsi_target.assert_called_once_with(
                'iqn.2010-10.org.openstack:testvol',
                'ed2c2222-5fc0-11e4-aa15-123b93f75cba',
                0, 1, self.fake_volumes_dir, None)
Example #34
0
 def _init_aggregates(self):
     elevated = context_module.get_admin_context()
     aggs = objects.AggregateList.get_all(elevated)
     for agg in aggs:
         self.aggs_by_id[agg.id] = agg
         for host in agg.hosts:
             self.host_aggregates_map[host].add(agg.id)
Example #35
0
 def _async_init_instance_info():
     context = context_module.get_admin_context()
     LOG.debug("START:_async_init_instance_info")
     self._instance_info = {}
     compute_nodes = objects.ComputeNodeList.get_all(context).objects
     LOG.debug("Total number of compute nodes: %s", len(compute_nodes))
     # Break the queries into batches of 10 to reduce the total number
     # of calls to the DB.
     batch_size = 10
     start_node = 0
     end_node = batch_size
     while start_node <= len(compute_nodes):
         curr_nodes = compute_nodes[start_node:end_node]
         start_node += batch_size
         end_node += batch_size
         filters = {"host": [curr_node.host
                             for curr_node in curr_nodes],
                    "deleted": False}
         result = objects.InstanceList.get_by_filters(context,
                                                      filters)
         instances = result.objects
         LOG.debug("Adding %s instances for hosts %s-%s",
                   len(instances), start_node, end_node)
         for instance in instances:
             host = instance.host
             if host not in self._instance_info:
                 self._instance_info[host] = {"instances": {},
                                              "updated": False}
             inst_dict = self._instance_info[host]
             inst_dict["instances"][instance.uuid] = instance
         # Call sleep() to cooperatively yield
         time.sleep(0)
     LOG.debug("END:_async_init_instance_info")
Example #36
0
 def _determine_version_cap(self, target):
     global LAST_VERSION
     if LAST_VERSION:
         return LAST_VERSION
     service_version = objects.Service.get_minimum_version(
         context.get_admin_context(), 'nova-compute')
     history = service_obj.SERVICE_VERSION_HISTORY
     try:
         version_cap = history[service_version]['compute_rpc']
     except IndexError:
         LOG.error(_LE('Failed to extract compute RPC version from '
                       'service history because I am too '
                       'old (minimum version is now %(version)i)'),
                   {'version': service_version})
         raise exception.ServiceTooOld(thisver=service_obj.SERVICE_VERSION,
                                       minver=service_version)
     except KeyError:
         LOG.error(_LE('Failed to extract compute RPC version from '
                       'service history for version %(version)i'),
                   {'version': service_version})
         return target.version
     LAST_VERSION = version_cap
     LOG.info(_LI('Automatically selected compute RPC version %(rpc)s '
                  'from minimum service version %(service)i'),
              {'rpc': version_cap,
               'service': service_version})
     return version_cap
Example #37
0
    def create_volume(self, volume):
        LOG.debug('start to create volume')
        LOG.debug('volume glance image metadata: %s' %
                  volume.volume_glance_metadata)

        volume_args = {}
        volume_args['size'] = volume.size
        volume_args['display_description'] = volume.display_description
        volume_args['display_name'] = self._get_provider_volume_name(
            volume.display_name, volume.id)

        context = req_context.RequestContext(is_admin=True,
                                             project_id=volume.project_id)
        volume_type_id = volume.volume_type_id
        volume_type_name = None
        LOG.debug('volume type id %s ' % volume_type_id)
        if volume_type_id:
            volume_type_name = self._get_sub_type_name(
                req_context.get_admin_context(), volume_type_id)

        if volume_type_name:
            volume_args['volume_type'] = volume_type_name

        optionals = ('shareable', 'metadata', 'multiattach')
        volume_args.update((prop, getattr(volume, prop)) for prop in optionals
                           if getattr(volume, prop, None))

        if 'metadata' not in volume_args:
            volume_args['metadata'] = {}
        volume_args['metadata']['tag:caa_volume_id'] = volume.id

        sub_volume = self.os_cinderclient(context).create_volume(**volume_args)
        LOG.debug('submit create-volume task to sub os. '
                  'sub volume id: %s' % sub_volume.id)

        LOG.debug('start to wait for volume %s in status '
                  'available' % sub_volume.id)
        try:
            self.os_cinderclient(context).check_create_volume_complete(
                sub_volume)
        except Exception as ex:
            LOG.exception(
                _LE("volume(%s), check_create_volume_complete "
                    "failed! ex = %s"), volume.id, ex)
            with excutils.save_and_reraise_exception():
                sub_volume.delete()

        try:
            # create volume mapper
            values = {"provider_volume_id": sub_volume.id}
            self.caa_db_api.volume_mapper_create(context, volume.id,
                                                 context.project_id, values)
        except Exception as ex:
            LOG.exception(_LE("volume_mapper_create failed! ex = %s"), ex)
            sub_volume.delete()
            raise

        LOG.debug('create volume %s success.' % volume.id)

        return {'provider_location': 'SUB-FusionSphere'}
Example #38
0
 def setUp(self):
     super(TestNexentaISCSIDriver, self).setUp()
     self.cfg = mock.Mock(spec=conf.Configuration)
     self.ctxt = context.get_admin_context()
     self.cfg.nexenta_dataset_description = ''
     self.cfg.nexenta_host = '1.1.1.1'
     self.cfg.nexenta_user = '******'
     self.cfg.nexenta_password = '******'
     self.cfg.nexenta_volume = 'storage'
     self.cfg.nexenta_rest_port = 2000
     self.cfg.nexenta_rest_protocol = 'http'
     self.cfg.nexenta_iscsi_target_portal_port = 3260
     self.cfg.nexenta_target_prefix = 'iqn:'
     self.cfg.nexenta_target_group_prefix = 'storage/'
     self.cfg.nexenta_blocksize = '8K'
     self.cfg.nexenta_sparse = True
     self.cfg.nexenta_dataset_compression = 'on'
     self.cfg.nexenta_dataset_dedup = 'off'
     self.cfg.nexenta_rrmgr_compression = 1
     self.cfg.nexenta_rrmgr_tcp_buf_size = 1024
     self.cfg.nexenta_rrmgr_connections = 2
     self.cfg.reserved_percentage = 20
     self.nms_mock = mock.Mock()
     for mod in ['volume', 'zvol', 'iscsitarget', 'appliance',
                 'stmf', 'scsidisk', 'snapshot']:
         setattr(self.nms_mock, mod, mock.Mock())
     self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
                    lambda *_, **__: self.nms_mock)
     self.drv = iscsi.NexentaISCSIDriver(
         configuration=self.cfg)
     self.drv.db = storage
     self.drv.do_setup(self.ctxt)
Example #39
0
    def init_host_floating_ips(self):
        """Configures floating IPs owned by host."""

        admin_context = context.get_admin_context()
        try:
            floating_ips = objects.FloatingIPList.get_by_host(
                admin_context, self.host)
        except exception.NotFound:
            return

        for floating_ip in floating_ips:
            if floating_ip.fixed_ip_id:
                try:
                    fixed_ip = floating_ip.fixed_ip
                except exception.FixedIpNotFound:
                    LOG.debug('Fixed IP %s not found', floating_ip.fixed_ip_id)
                    continue
                interface = CONF.public_interface or floating_ip.interface
                try:
                    self.l3driver.add_floating_ip(floating_ip.address,
                                                  fixed_ip.address, interface,
                                                  fixed_ip.network)
                except processutils.ProcessExecutionError:
                    LOG.debug('Interface %s not found', interface)
                    raise exception.NoFloatingIpInterface(interface=interface)
Example #40
0
def get_flavor_access_by_flavor_id(flavorid, ctxt=None):
    """Retrieve flavor access list by flavor id."""
    if ctxt is None:
        ctxt = context.get_admin_context()

    flavor = objects.Flavor.get_by_flavor_id(ctxt, flavorid)
    return flavor.projects
Example #41
0
    def test_connection_switch(self):
        # Use a file-based sqlite database so data will persist across new
        # connections
        fake_conn = 'sqlite:///' + self.test_filename

        # The 'main' database connection will stay open, so in-memory is fine
        self.useFixture(fixtures.Database(database='main'))
        self.useFixture(fixtures.Database(connection=fake_conn))

        # Make a request context with a cell mapping
        mapping = compute.CellMapping(database_connection=fake_conn)
        # In the tests, the admin context is required in order to read
        # an Instance back after write, for some reason
        ctxt = context.get_admin_context()
        # Create an instance in the cell database
        uuid = uuidutils.generate_uuid()
        with context.target_cell(ctxt, mapping):
            instance = compute.Instance(context=ctxt, uuid=uuid)
            instance.create()

            # Verify the instance is found in the cell database
            inst = compute.Instance.get_by_uuid(ctxt, uuid)
            self.assertEqual(uuid, inst.uuid)

        # Verify the instance isn't found in the main database
        self.assertRaises(exception.InstanceNotFound,
                          compute.Instance.get_by_uuid, ctxt, uuid)
Example #42
0
    def test_attach_volume_to_server(self):
        self.stub_out('compute.volume.cinder.API.get', fakes.stub_volume_get)
        self.stub_out('compute.volume.cinder.API.check_attach',
                      lambda *a, **k: None)
        self.stub_out('compute.volume.cinder.API.reserve_volume',
                      lambda *a, **k: None)
        device_name = '/dev/vdd'
        bdm = compute.BlockDeviceMapping()
        bdm['device_name'] = device_name
        self.stub_out(
            'compute.compute.manager.ComputeManager.reserve_block_device_name',
            lambda *a, **k: bdm)
        self.stub_out('compute.compute.manager.ComputeManager.attach_volume',
                      lambda *a, **k: None)
        self.stub_out(
            'compute.compute.BlockDeviceMapping.get_by_volume_and_instance',
            classmethod(lambda *a, **k: None))

        volume = fakes.stub_volume_get(None, context.get_admin_context(),
                                       'a26887c6-c47b-4654-abb5-dfadf7d3f803')
        subs = {'volume_id': volume['id'], 'device': device_name}
        server_id = self._post_server()
        response = self._do_post(
            'servers/%s/os-volume_attachments' % server_id,
            'attach-volume-to-server-req', subs)

        self._verify_response('attach-volume-to-server-resp', subs, response,
                              200)
Example #43
0
 def setUp(self):
     super(TestNexentaISCSIDriver, self).setUp()
     self.cfg = mock.Mock(spec=conf.Configuration)
     self.ctxt = context.get_admin_context()
     self.cfg.nexenta_dataset_description = ''
     self.cfg.nexenta_host = '1.1.1.1'
     self.cfg.nexenta_user = '******'
     self.cfg.nexenta_password = '******'
     self.cfg.nexenta_volume = 'storage'
     self.cfg.nexenta_rest_port = 2000
     self.cfg.nexenta_rest_protocol = 'http'
     self.cfg.nexenta_iscsi_target_portal_port = 8080
     self.cfg.nexenta_target_prefix = 'iqn:'
     self.cfg.nexenta_target_group_prefix = 'storage/'
     self.cfg.nexenta_ns5_blocksize = 32
     self.cfg.nexenta_sparse = True
     self.cfg.nexenta_dataset_compression = 'on'
     self.cfg.nexenta_dataset_dedup = 'off'
     self.cfg.reserved_percentage = 20
     self.cfg.nexenta_volume = 'pool'
     self.cfg.nexenta_volume_group = 'dsg'
     self.nef_mock = mock.Mock()
     self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
                    lambda *_, **__: self.nef_mock)
     self.drv = iscsi.NexentaISCSIDriver(
         configuration=self.cfg)
     self.drv.db = storage
     self.drv.do_setup(self.ctxt)
Example #44
0
 def setUp(self):
     super(TestNexentaNfsDriver, self).setUp()
     self.ctxt = context.get_admin_context()
     self.cfg = mock.Mock(spec=conf.Configuration)
     self.cfg.nexenta_dataset_description = ''
     self.cfg.nexenta_shares_config = None
     self.cfg.nexenta_mount_point_base = '$state_path/mnt'
     self.cfg.nexenta_sparsed_volumes = True
     self.cfg.nexenta_dataset_compression = 'on'
     self.cfg.nexenta_dataset_dedup = 'off'
     self.cfg.nexenta_rrmgr_compression = 1
     self.cfg.nexenta_rrmgr_tcp_buf_size = 1024
     self.cfg.nexenta_rrmgr_connections = 2
     self.cfg.nfs_mount_point_base = '/mnt/test'
     self.cfg.nfs_mount_options = None
     self.cfg.nas_mount_options = None
     self.cfg.nexenta_nms_cache_volroot = False
     self.cfg.nfs_mount_attempts = 3
     self.cfg.reserved_percentage = 20
     self.cfg.max_over_subscription_ratio = 20.0
     self.nms_mock = mock.Mock()
     for mod in ('appliance', 'folder', 'server', 'volume', 'netstorsvc',
                 'snapshot', 'netsvc'):
         setattr(self.nms_mock, mod, mock.Mock())
     self.nms_mock.__hash__ = lambda *_, **__: 1
     self.stubs.Set(jsonrpc, 'NexentaJSONProxy',
                    lambda *_, **__: self.nms_mock)
     self.drv = nfs.NexentaNfsDriver(configuration=self.cfg)
     self.drv.shares = {}
     self.drv.share2nms = {}
Example #45
0
 def test_boot_servers_with_affinity_overquota(self):
     # Tests that we check server group member quotas and cleanup created
     # resources when we fail with OverQuota.
     self.flags(quota_server_group_members=1)
     # make sure we start with 0 servers
     servers = self.api.get_servers(detail=False)
     self.assertEqual(0, len(servers))
     created_group = self.api.post_server_groups(self.affinity)
     ex = self.assertRaises(client.OpenStackApiException,
                            self._boot_servers_to_group, created_group)
     self.assertEqual(403, ex.response.status_code)
     # _boot_servers_to_group creates 2 instances in the group in order, not
     # multiple servers in a single request. Since our quota is 1, the first
     # server create would pass, the second should fail, and we should be
     # left with 1 server and it's 1 block device mapping.
     servers = self.api.get_servers(detail=False)
     self.assertEqual(1, len(servers))
     ctxt = context.get_admin_context()
     servers = compute.instance_get_all(ctxt)
     self.assertEqual(1, len(servers))
     ctxt_mgr = db_api.get_context_manager(ctxt)
     with ctxt_mgr.reader.using(ctxt):
         bdms = db_api._block_device_mapping_get_query(ctxt).all()
     self.assertEqual(1, len(bdms))
     self.assertEqual(servers[0]['uuid'], bdms[0]['instance_uuid'])
Example #46
0
    def setUp(self):
        super(ExtendedFloatingIpTestV21, self).setUp()
        self.stubs.Set(cloud.api.API, "get",
                       compute_api_get)
        self.stubs.Set(network.api.API, "get_floating_ip",
                       network_api_get_floating_ip)
        self.stubs.Set(network.api.API, "get_floating_ip_by_address",
                       network_api_get_floating_ip_by_address)
        self.stubs.Set(network.api.API, "get_floating_ips_by_project",
                       network_api_get_floating_ips_by_project)
        self.stubs.Set(network.api.API, "release_floating_ip",
                       network_api_release)
        self.stubs.Set(network.api.API, "disassociate_floating_ip",
                       network_api_disassociate)
        self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
                       get_instance_by_floating_ip_addr)
        self.stubs.Set(compute_utils, "get_nw_info_for_instance",
                       stub_nw_info(self))

        fake_network.stub_out_nw_api_get_instance_nw_info(self)
        self.stub_out('cloud.cloud.instance_get',
                      fake_instance_get)

        self.context = context.get_admin_context()
        self._create_floating_ips()

        self.ext_mgr = extensions.ExtensionManager()
        self.ext_mgr.extensions = {}
        self.ext_mgr.extensions['os-floating-ips'] = True
        self.ext_mgr.extensions['os-extended-floating-ips'] = True
        self.controller = self.floating_ips.FloatingIPController()
        self.manager = self.floating_ips.\
                       FloatingIPActionController(self.ext_mgr)
        self.fake_req = fakes.HTTPRequest.blank('')
    def _create_instance_with_availability_zone(self, zone_name):
        def create(*args, **kwargs):
            self.assertIn('availability_zone', kwargs)
            self.assertEqual('compute', kwargs['availability_zone'])
            return old_create(*args, **kwargs)

        old_create = compute_api.API.create
        self.stubs.Set(compute_api.API, 'create', create)
        image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
        flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
        body = {
            'server': {
                'name': 'server_test',
                'imageRef': image_href,
                'flavorRef': flavor_ref,
                'metadata': {
                    'hello': 'world',
                    'open': 'stack',
                },
                'availability_zone': zone_name,
            },
        }

        admin_context = context.get_admin_context()
        compute.service_create(
            admin_context, {
                'host': 'host1_zones',
                'binary': "compute-compute",
                'topic': 'compute',
                'report_count': 0
            })
        agg = compute.aggregate_create(admin_context, {'name': 'agg1'},
                                       {'availability_zone': 'compute'})
        compute.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
        return self.req, body
Example #48
0
    def _get_instance_id_from_lb(self, provider_id, instance_address):
        # We use admin context, admin=True to lookup the
        # inter-Edge network port
        context = nova_context.get_admin_context()
        neutron = neutronapi.get_client(context, admin=True)

        # Tenant, instance ids are found in the following method:
        #  X-Metadata-Provider contains id of the metadata provider, and since
        #  overlapping networks cannot be connected to the same metadata
        #  provider, the combo of tenant's instance IP and the metadata
        #  provider has to be unique.
        #
        #  The networks which are connected to the metadata provider are
        #  retrieved in the 1st call to neutron.list_subnets()
        #  In the 2nd call we read the ports which belong to any of the
        #  networks retrieved above, and have the X-Forwarded-For IP address.
        #  This combination has to be unique as explained above, and we can
        #  read the instance_id, tenant_id from that port entry.

        # Retrieve networks which are connected to metadata provider
        md_subnets = neutron.list_subnets(
            context,
            advanced_service_providers=[provider_id],
            fields=['network_id'])

        md_networks = [
            subnet['network_id'] for subnet in md_subnets['subnets']
        ]

        try:
            # Retrieve the instance data from the instance's port
            instance_data = neutron.list_ports(
                context,
                fixed_ips='ip_address=' + instance_address,
                network_id=md_networks,
                fields=['device_id', 'tenant_id'])['ports'][0]
        except Exception as e:
            LOG.error(
                _LE('Failed to get instance id for metadata '
                    'request, provider %(provider)s '
                    'networks %(networks)s '
                    'requester %(requester)s. Error: %(error)s'), {
                        'provider': provider_id,
                        'networks': md_networks,
                        'requester': instance_address,
                        'error': e
                    })
            msg = _('An unknown error has occurred. '
                    'Please try your request again.')
            raise webob.exc.HTTPBadRequest(explanation=msg)

        instance_id = instance_data['device_id']
        tenant_id = instance_data['tenant_id']

        # instance_data is unicode-encoded, while cache_utils doesn't like
        # that. Therefore we convert to str
        if isinstance(instance_id, six.text_type):
            instance_id = instance_id.encode('utf-8')
        return instance_id, tenant_id
Example #49
0
 def _force_reclaim(self):
     # Make sure that compute manager thinks the instance is
     # old enough to be expired
     the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
     timeutils.set_time_override(override_time=the_past)
     self.addCleanup(timeutils.clear_time_override)
     ctxt = context.get_admin_context()
     self.compute._reclaim_queued_deletes(ctxt)
Example #50
0
def id_to_ec2_vol_id(volume_id):
    """Get or create an ec2 volume ID (vol-[base 16 number]) from uuid."""
    if uuidutils.is_uuid_like(volume_id):
        ctxt = context.get_admin_context()
        int_id = get_int_id_from_volume_uuid(ctxt, volume_id)
        return id_to_ec2_id(int_id, 'vol-%08x')
    else:
        return id_to_ec2_id(volume_id, 'vol-%08x')
Example #51
0
    def start(self):
        verstr = version.version_string_with_package()
        LOG.info(_LI('Starting %(topic)s node (version %(version)s)'),
                 {'topic': self.topic, 'version': verstr})
        self.basic_config_check()
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        self.service_ref = objects.Service.get_by_host_and_binary(
            ctxt, self.host, self.binary)
        if not self.service_ref:
            try:
                self.service_ref = _create_service_ref(self, ctxt)
            except (exception.ServiceTopicExists,
                    exception.ServiceBinaryExists):
                # NOTE(danms): If we race to create a record with a sibling
                # worker, don't fail here.
                self.service_ref = objects.Service.get_by_host_and_binary(
                    ctxt, self.host, self.binary)

        self.manager.pre_start_hook()

        if self.backdoor_port is not None:
            self.manager.backdoor_port = self.backdoor_port

        LOG.debug("Creating RPC server for service %s", self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)

        endpoints = [
            self.manager,
            baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port)
        ]
        endpoints.extend(self.manager.additional_endpoints)

        # serializer = objects_base.NovaObjectSerializer()
        serializer = objects_base.JacketObjectSerializer()

        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        self.manager.post_start_hook()

        LOG.debug("Join ServiceGroup membership for this service %s",
                  self.topic)
        # Add service to the ServiceGroup membership group.
        self.servicegroup_api.join(self.host, self.topic, self)

        if self.periodic_enable:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            self.tg.add_dynamic_timer(self.periodic_tasks,
                                      initial_delay=initial_delay,
                                      periodic_interval_max=
                                      self.periodic_interval_max)
Example #52
0
def get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
    """Retrieve flavor by flavorid.

    :raises: FlavorNotFound
    """
    if ctxt is None:
        ctxt = context.get_admin_context(read_deleted=read_deleted)

    return objects.Flavor.get_by_flavor_id(ctxt, flavorid, read_deleted)
Example #53
0
def get_flavor_by_name(name, ctxt=None):
    """Retrieves single flavor by name."""
    if name is None:
        return get_default_flavor()

    if ctxt is None:
        ctxt = context.get_admin_context()

    return objects.Flavor.get_by_name(ctxt, name)
Example #54
0
def destroy(name):
    """Marks flavor as deleted."""
    try:
        if not name:
            raise ValueError()
        flavor = objects.Flavor(context=context.get_admin_context(), name=name)
        flavor.destroy()
    except (ValueError, exception.NotFound):
        LOG.exception(_LE('Instance type %s not found for deletion'), name)
        raise exception.FlavorNotFoundByName(flavor_name=name)
Example #55
0
 def __init__(self):
     target = messaging.Target(topic='hyper-agent-vif-update',
                               version='1.0',
                               exchange='hyperagent')
     serializer = objects_base.JacketObjectSerializer()
     self.client = rpc.get_client(target, serializer=serializer)
     self.client.timeout = HyperAgentAPI.plug_retry_timeout
     self.context = nova_context.get_admin_context()
     self.call_back = HyperAgentCallback()
     super(HyperAgentAPI, self).__init__()
Example #56
0
def get_volume_type(ctxt, id, expected_fields=None):
    """Retrieves single volume type by id."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidVolumeType(reason=msg)

    if ctxt is None:
        ctxt = context.get_admin_context()

    return db.volume_type_get(ctxt, id, expected_fields=expected_fields)
Example #57
0
def get_qos_specs(ctxt, id):
    """Retrieves single qos specs by id."""
    if id is None:
        msg = _("id cannot be None")
        raise exception.InvalidQoSSpecs(reason=msg)

    if ctxt is None:
        ctxt = context.get_admin_context()

    return db.qos_specs_get(ctxt, id)
Example #58
0
def get_volume_type_extra_specs(volume_type_id, key=False):
    volume_type = get_volume_type(context.get_admin_context(), volume_type_id)
    extra_specs = volume_type['extra_specs']
    if key:
        if extra_specs.get(key):
            return extra_specs.get(key)
        else:
            return False
    else:
        return extra_specs