def test_db_records_to_json(self):
        grid_1_id = 100
        grid_2_id = 200
        dbi.add_grid(self.ctx.session, grid_1_id, 'test grid 1',
                     '{"wapi_version": "2.0",'
                     '"wapi_admin_user": '******'{ "name": "admin", "password": "******" }}',
                     'ON',
                     'gm-id-1')
        dbi.add_grid(self.ctx.session, grid_2_id, 'test grid 2', '{}', 'OFF',
                     'gm-id-2')

        grids = dbi.get_grids(self.ctx.session)

        json = utils.db_records_to_json(grids)

        self.assertEqual('test grid 1', json[0]["grid_name"])
        self.assertEqual('test grid 2', json[1]["grid_name"])

        json_string = json[0]["grid_connection"]
        grid_connection_json = jsonutils.loads(json_string)

        self.assertIsInstance(json_string, six.string_types)
        self.assertIsInstance(grid_connection_json, dict)
        self.assertEqual('2.0', grid_connection_json['wapi_version'])
        self.assertEqual('admin',
                         grid_connection_json['wapi_admin_user']['name'])

        grid_connection = utils.json_to_obj('grid_connection',
                                            grid_connection_json)
        self.assertEqual('2.0', grid_connection.wapi_version)
        self.assertEqual('admin', grid_connection.wapi_admin_user.name)

        self.assertEqual('{}', json[1]["grid_connection"])
        self.assertEqual({}, jsonutils.loads(json[1]["grid_connection"]))
 def _update_router(self, router_id, set_enable_snat, **kwargs):
     uri = '%s/routers/%s' % (self.uri_prefix, router_id)
     resp, body = self.get(uri)
     self.expected_success(200, resp.status)
     body = json.loads(body)
     update_body = {}
     update_body['name'] = kwargs.get('name', body['router']['name'])
     update_body['admin_state_up'] = kwargs.get(
         'admin_state_up', body['router']['admin_state_up'])
     cur_gw_info = body['router']['external_gateway_info']
     if cur_gw_info:
         # TODO(kevinbenton): setting the external gateway info is not
         # allowed for a regular tenant. If the ability to update is also
         # merged, a test case for this will need to be added similar to
         # the SNAT case.
         cur_gw_info.pop('external_fixed_ips', None)
         if not set_enable_snat:
             cur_gw_info.pop('enable_snat', None)
     update_body['external_gateway_info'] = kwargs.get(
         'external_gateway_info', body['router']['external_gateway_info'])
     if 'distributed' in kwargs:
         update_body['distributed'] = kwargs['distributed']
     update_body = dict(router=update_body)
     update_body = json.dumps(update_body)
     resp, body = self.put(uri, update_body)
     self.expected_success(200, resp.status)
     body = json.loads(body)
     return service_client.ResponseBody(resp, body)
    def test_update_metadata(self):
        self.skip("This should use patch instead")
        xyz_queue_path = self.url_prefix + '/queues/xyz'
        xyz_queue_path_metadata = xyz_queue_path

        # Create
        self.simulate_put(xyz_queue_path, headers=self.headers)
        self.assertEqual(self.srmock.status, falcon.HTTP_201)

        # Set meta
        doc1 = '{"messages": {"ttl": 600}}'
        self.simulate_put(xyz_queue_path_metadata,
                          headers=self.headers,
                          body=doc1)
        self.assertEqual(self.srmock.status, falcon.HTTP_204)

        # Update
        doc2 = '{"messages": {"ttl": 100}}'
        self.simulate_put(xyz_queue_path_metadata,
                          headers=self.headers,
                          body=doc2)
        self.assertEqual(self.srmock.status, falcon.HTTP_204)

        # Get
        result = self.simulate_get(xyz_queue_path_metadata,
                                   headers=self.headers)
        result_doc = jsonutils.loads(result[0])

        self.assertEqual(result_doc, jsonutils.loads(doc2))
    def test_create_ensure_expires_at_is_not_returned(self):
        response = webob.Response()

        self.serializer.create(response, self.fixtures[0])

        serialized_task = jsonutils.loads(response.body)
        self.assertEqual(201, response.status_int)
        self.assertEqual(self.fixtures[0].task_id,
                         serialized_task['id'])
        self.assertEqual(self.fixtures[0].task_input,
                         serialized_task['input'])
        self.assertNotIn('expires_at', serialized_task)
        self.assertEqual('application/json', response.content_type)

        response = webob.Response()

        self.serializer.create(response, self.fixtures[1])

        serialized_task = jsonutils.loads(response.body)
        self.assertEqual(201, response.status_int)
        self.assertEqual(self.fixtures[1].task_id,
                         serialized_task['id'])
        self.assertEqual(self.fixtures[1].task_input,
                         serialized_task['input'])
        self.assertNotIn('expires_at', serialized_task)
        self.assertEqual('application/json', response.content_type)
    def test_simple(self):
        self.headers = {
            'Client-ID': str(uuid.uuid4()),
            'X-Project-ID': '338730984abc_1'
        }

        gumshoe_queue_path = self.url_prefix + '/queues/gumshoe'
        doc = '{"messages": {"ttl": 600}}'
        self.simulate_put(gumshoe_queue_path,
                          headers=self.headers,
                          body=doc)
        self.assertEqual(self.srmock.status, falcon.HTTP_503)

        location = ('Location', gumshoe_queue_path)
        self.assertNotIn(location, self.srmock.headers)

        result = self.simulate_get(gumshoe_queue_path,
                                   headers=self.headers)
        result_doc = jsonutils.loads(result[0])
        self.assertEqual(self.srmock.status, falcon.HTTP_503)
        self.assertNotEqual(result_doc, jsonutils.loads(doc))

        self.simulate_get(gumshoe_queue_path + '/stats',
                          headers=self.headers)
        self.assertEqual(self.srmock.status, falcon.HTTP_503)

        self.simulate_get(self.url_prefix + '/queues',
                          headers=self.headers)
        self.assertEqual(self.srmock.status, falcon.HTTP_503)

        self.simulate_delete(gumshoe_queue_path, headers=self.headers)
        self.assertEqual(self.srmock.status, falcon.HTTP_503)
Exemple #6
0
    def _listing_test(self, count=10, limit=10,
                      marker=None, detailed=False):
        # NOTE(cpp-cabrera): delete initial pool - it will interfere
        # with listing tests
        self.simulate_delete(self.pool)
        query = '?limit={0}&detailed={1}'.format(limit, detailed)
        if marker:
            query += '&marker={0}'.format(marker)

        with pools(self, count, self.doc['uri'], 'my-group') as expected:
            result = self.simulate_get(self.url_prefix + '/pools',
                                       query_string=query)
            self.assertEqual(self.srmock.status, falcon.HTTP_200)
            results = jsonutils.loads(result[0])
            self.assertIsInstance(results, dict)
            self.assertIn('pools', results)
            self.assertIn('links', results)
            pool_list = results['pools']

            link = results['links'][0]
            self.assertEqual('next', link['rel'])
            href = falcon.uri.parse_query_string(link['href'])
            self.assertIn('marker', href)
            self.assertEqual(href['limit'], str(limit))
            self.assertEqual(href['detailed'], str(detailed).lower())

            next_query_string = ('?marker={marker}&limit={limit}'
                                 '&detailed={detailed}').format(**href)
            next_result = self.simulate_get(link['href'].split('?')[0],
                                            query_string=next_query_string)
            self.assertEqual(self.srmock.status, falcon.HTTP_200)

            next_pool = jsonutils.loads(next_result[0])
            next_pool_list = next_pool['pools']

            self.assertIn('links', next_pool)
            if limit < count:
                self.assertEqual(len(next_pool_list),
                                 min(limit, count-limit))
            else:
                # NOTE(jeffrey4l): when limit >= count, there will be no
                # pools in the 2nd page.
                self.assertTrue(len(next_pool_list) == 0)

            self.assertEqual(len(pool_list), min(limit, count))
            for s in pool_list + next_pool_list:
                # NOTE(flwang): It can't assumed that both sqlalchemy and
                # mongodb can return query result with the same order. Just
                # like the order they're inserted. Actually, sqlalchemy can't
                # guarantee that. So we're leveraging the relationship between
                # pool weight and the index of pools fixture to get the
                # right pool to verify.
                expect = expected[s['weight']]
                path, weight, group = expect[:3]
                self._pool_expect(s, path, weight, self.doc['uri'])
                if detailed:
                    self.assertIn('options', s)
                    self.assertEqual(s['options'], expect[-1])
                else:
                    self.assertNotIn('options', s)
Exemple #7
0
    def test_v3_disabled(self):
        client = TestClient(self.public_app)
        # request to /v3 should fail
        resp = client.get('/v3/')
        self.assertEqual(http_client.NOT_FOUND, resp.status_int)

        # request to /v2.0 should pass
        resp = client.get('/v2.0/')
        self.assertEqual(http_client.OK, resp.status_int)
        data = jsonutils.loads(resp.body)
        expected = v2_VERSION_RESPONSE
        self._paste_in_port(expected['version'],
                            'http://localhost:%s/v2.0/' % self.public_port)
        self.assertEqual(expected, data)

        # only v2 information should be displayed by requests to /
        v2_only_response = {
            "versions": {
                "values": [
                    v2_EXPECTED_RESPONSE
                ]
            }
        }
        self._paste_in_port(v2_only_response['versions']['values'][0],
                            'http://localhost:%s/v2.0/' % self.public_port)
        resp = client.get('/')
        self.assertEqual(300, resp.status_int)
        data = jsonutils.loads(resp.body)
        self.assertEqual(v2_only_response, data)
Exemple #8
0
def fake_req_obj(ctxt, db_req=None):
    if db_req is None:
        db_req = fake_db_req()
    req_obj = objects.BuildRequest(ctxt)
    for field in req_obj.fields:
        value = db_req[field]
        # create() can't be called if this is set
        if field == 'id':
            continue
        if isinstance(req_obj.fields[field], fields.ObjectField):
            value = value
            if field == 'instance':
                req_obj.instance = objects.Instance.obj_from_primitive(
                        jsonutils.loads(value))
            elif field == 'block_device_mappings':
                req_obj.block_device_mappings = (
                    objects.BlockDeviceMappingList.obj_from_primitive(
                        jsonutils.loads(value)))
        elif field == 'instance_metadata':
            setattr(req_obj, field, jsonutils.loads(value))
        else:
            setattr(req_obj, field, value)
    # This should never be a changed field
    req_obj.obj_reset_changes(['id'])
    return req_obj
Exemple #9
0
    def test_create_server_detect_from_image(self):
        """If user doesn't pass in diskConfig for server, use image metadata
        to specify AUTO or MANUAL.
        """
        req = fakes.HTTPRequest.blank('/fake/servers')
        req.method = 'POST'
        req.content_type = 'application/json'
        body = {'server': {
                  'name': 'server_test',
                  'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379',
                  'flavorRef': '1',
               }}

        req.body = jsonutils.dump_as_bytes(body)
        res = req.get_response(self.app)
        server_dict = jsonutils.loads(res.body)['server']
        self.assertDiskConfig(server_dict, 'MANUAL')

        req = fakes.HTTPRequest.blank('/fake/servers')
        req.method = 'POST'
        req.content_type = 'application/json'
        body = {'server': {
                  'name': 'server_test',
                  'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b',
                  'flavorRef': '1',
               }}

        req.body = jsonutils.dump_as_bytes(body)
        res = req.get_response(self.app)
        server_dict = jsonutils.loads(res.body)['server']
        self.assertDiskConfig(server_dict, 'AUTO')
Exemple #10
0
    def test_plugin_prefix_with_parent_resource(self):
        controller = self.DummySvcPlugin()
        parent = dict(member_name="tenant",
                      collection_name="tenants")
        member = {'custom_member_action': "GET"}
        collections = {'collection_action': "GET"}
        res_ext = extensions.ResourceExtension('tweedles', controller, parent,
                                               path_prefix="/dummy_svc",
                                               member_actions=member,
                                               collection_actions=collections)
        test_app = _setup_extensions_test_app(SimpleExtensionManager(res_ext))

        index_response = test_app.get("/dummy_svc/tenants/1/tweedles")
        self.assertEqual(200, index_response.status_int)

        response = test_app.get("/dummy_svc/tenants/1/"
                                "tweedles/1/custom_member_action")
        self.assertEqual(200, response.status_int)
        self.assertEqual(jsonutils.loads(response.body)['member_action'],
                         "value")

        response = test_app.get("/dummy_svc/tenants/2/"
                                "tweedles/collection_action")
        self.assertEqual(200, response.status_int)
        self.assertEqual(jsonutils.loads(response.body)['collection'],
                         "value")
    def test_list_extensions_json(self):
        app = compute.APIRouter(init_only=('extensions',))
        request = webob.Request.blank("/fake/extensions")
        response = request.get_response(app)
        self.assertEqual(200, response.status_int)

        # Make sure we have all the extensions, extra extensions being OK.
        data = jsonutils.loads(response.body)
        names = [str(x['name']) for x in data['extensions']
                 if str(x['name']) in self.ext_list]
        names.sort()
        self.assertEqual(names, self.ext_list)

        # Ensure all the timestamps are valid according to iso8601
        for ext in data['extensions']:
            iso8601.parse_date(ext['updated'])

        # Make sure that at least Fox in Sox is correct.
        (fox_ext, ) = [
            x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
        self.assertEqual(
            {'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
                'name': 'Fox In Socks',
                'updated': '2011-01-22T13:25:27-06:00',
                'description': 'The Fox In Socks Extension.',
                'alias': 'FOXNSOX',
                'links': []}, fox_ext)

        for ext in data['extensions']:
            url = '/fake/extensions/%s' % ext['alias']
            request = webob.Request.blank(url)
            response = request.get_response(app)
            output = jsonutils.loads(response.body)
            self.assertEqual(output['extension']['alias'], ext['alias'])
    def test_custom_metadata(self):
        # Set
        doc = '{{"messages": {{"ttl": 600}}, "padding": "{pad}"}}'

        max_size = self.transport_cfg.max_queue_metadata
        padding_len = max_size - (len(doc) - 2)

        doc = doc.format(pad='x' * padding_len)
        self.simulate_put(self.fizbat_queue_path,
                          headers=self.headers,
                          body=doc)
        self.assertEqual(falcon.HTTP_201, self.srmock.status)

        # Get
        result = self.simulate_get(self.fizbat_queue_path,
                                   headers=self.headers)
        result_doc = jsonutils.loads(result[0])
        ref_doc = jsonutils.loads(doc)
        ref_doc['_default_message_ttl'] = 3600
        ref_doc['_max_messages_post_size'] = 262144
        ref_doc['_default_message_delay'] = 0
        ref_doc['_dead_letter_queue'] = None
        ref_doc['_dead_letter_queue_messages_ttl'] = None
        ref_doc['_max_claim_count'] = None
        self.assertEqual(ref_doc, result_doc)
        self.assertEqual(falcon.HTTP_200, self.srmock.status)
 def show_credential(self, credential_id):
     """To GET Details of a credential."""
     resp, body = self.get('credentials/%s' % credential_id)
     self.expected_success(200, resp.status)
     body = json.loads(body)
     body['credential']['blob'] = json.loads(body['credential']['blob'])
     return service_client.ResponseBody(resp, body)
Exemple #14
0
    def test_post_put_delete_triggers_notification(self):
        req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
        response = self.app.post_json(
            '/v2.0/networks.json',
            params={'network': {'name': 'meh'}}, headers=req_headers)
        self.assertEqual(201, response.status_int)
        json_body = jsonutils.loads(response.body)
        self.assertEqual(1, self.mock_notifier.call_count)
        self.assertEqual(mock.call(mock.ANY, json_body, 'network.create.end'),
                         self.mock_notifier.mock_calls[-1])
        network_id = json_body['network']['id']

        response = self.app.put_json(
            '/v2.0/networks/%s.json' % network_id,
            params={'network': {'name': 'meh-2'}},
            headers=req_headers)
        self.assertEqual(200, response.status_int)
        json_body = jsonutils.loads(response.body)
        self.assertEqual(2, self.mock_notifier.call_count)
        self.assertEqual(mock.call(mock.ANY, json_body, 'network.update.end'),
                         self.mock_notifier.mock_calls[-1])

        response = self.app.delete(
            '/v2.0/networks/%s.json' % network_id, headers=req_headers)
        self.assertEqual(204, response.status_int)
        self.assertEqual(3, self.mock_notifier.call_count)
        # No need to validate data content sent to the notifier as it's just
        # going to load the object from the database
        self.assertEqual(mock.call(mock.ANY, mock.ANY, 'network.delete.end'),
                         self.mock_notifier.mock_calls[-1])
Exemple #15
0
    def test_put_get_delete(self):
        # PUT and DELETE actions are in the same test as a meaningful DELETE
        # test would require a put anyway
        url = '%s/foo.json' % self.base_url
        response = self.app.put_json(url,
                                     params={'quota': {'network': 99}},
                                     headers={'X-Project-Id': 'admin',
                                              'X-Roles': 'admin'})
        self.assertEqual(200, response.status_int)
        json_body = jsonutils.loads(response.body)
        self._verify_after_update(json_body, {'network': 99})

        response = self.app.get(url, headers={'X-Project-Id': 'foo'})
        self.assertEqual(200, response.status_int)
        json_body = jsonutils.loads(response.body)
        self._verify_after_update(json_body, {'network': 99})

        response = self.app.delete(url, headers={'X-Project-Id': 'admin',
                                                 'X-Roles': 'admin'})
        self.assertEqual(204, response.status_int)
        # As DELETE does not return a body we need another GET
        response = self.app.get(url, headers={'X-Project-Id': 'foo'})
        self.assertEqual(200, response.status_int)
        json_body = jsonutils.loads(response.body)
        self._verify_default_limits(json_body)
Exemple #16
0
def upgrade_ubuntu_cobbler_profile_6_0_to_6_1(connection):
    select_query = text("SELECT id, generated FROM attributes")
    update_query = text(
        "UPDATE attributes SET generated = :generated WHERE id = :attr_id")
    for attr_id, generated in connection.execute(select_query):
        attrs = jsonutils.loads(generated)
        if attrs['cobbler']['profile'] == 'ubuntu_1204_x86_64':
            attrs['cobbler']['profile'] = 'ubuntu_1404_x86_64'
            connection.execute(
                update_query,
                generated=jsonutils.dumps(attrs),
                attr_id=attr_id)

    select_query = text("SELECT id, attributes_metadata FROM releases")
    update_query = text(
        "UPDATE releases SET attributes_metadata = :attrs_meta"
        " WHERE id = :release_id")
    for release_id, attributes_metadata in connection.execute(select_query):
        attrs = jsonutils.loads(attributes_metadata)
        if attrs['generated']['cobbler']['profile']['generator_arg'] == \
                'ubuntu_1204_x86_64':
            attrs['generated']['cobbler']['profile']['generator_arg'] = \
                'ubuntu_1404_x86_64'
            connection.execute(
                update_query,
                attrs_meta=jsonutils.dumps(attrs),
                release_id=release_id)
 def get_credential(self, credential_id):
     """To GET Details of a credential."""
     resp, body = self.get("credentials/%s" % credential_id)
     self.expected_success(200, resp.status)
     body = json.loads(body)
     body["credential"]["blob"] = json.loads(body["credential"]["blob"])
     return service_client.ResponseBody(resp, body)
    def test_credentials_create_get_update_delete(self):
        blob = '{"access": "%s", "secret": "%s"}' % (
            data_utils.rand_name('Access'), data_utils.rand_name('Secret'))
        cred = self.creds_client.create_credential(
            user_id=self.user_body['id'], project_id=self.projects[0],
            blob=blob, type='ec2')['credential']
        self.addCleanup(self._delete_credential, cred['id'])
        for value1 in self.creds_list[0]:
            self.assertIn(value1, cred)
        for value2 in self.creds_list[1]:
            self.assertIn(value2, cred['blob'])

        new_keys = [data_utils.rand_name('NewAccess'),
                    data_utils.rand_name('NewSecret')]
        blob = '{"access": "%s", "secret": "%s"}' % (new_keys[0], new_keys[1])
        update_body = self.creds_client.update_credential(
            cred['id'], blob=blob, project_id=self.projects[1],
            type='ec2')['credential']
        update_body['blob'] = json.loads(update_body['blob'])
        self.assertEqual(cred['id'], update_body['id'])
        self.assertEqual(self.projects[1], update_body['project_id'])
        self.assertEqual(self.user_body['id'], update_body['user_id'])
        self.assertEqual(update_body['blob']['access'], new_keys[0])
        self.assertEqual(update_body['blob']['secret'], new_keys[1])

        get_body = self.creds_client.show_credential(cred['id'])['credential']
        get_body['blob'] = json.loads(get_body['blob'])
        for value1 in self.creds_list[0]:
            self.assertEqual(update_body[value1],
                             get_body[value1])
        for value2 in self.creds_list[1]:
            self.assertEqual(update_body['blob'][value2],
                             get_body['blob'][value2])
Exemple #19
0
    def request(self, method, url, extra_headers=False, headers=None,
                body=None, chunked=False):
        """A simple HTTP request interface.

        Note: this overloads the `request` method from the parent class and
        thus must implement the same method signature.
        """
        if headers is None:
            # Always accept 'json', for xml token client too.
            # Because XML response is not easily
            # converted to the corresponding JSON one
            headers = self.get_headers(accept_type="json")
        elif extra_headers:
            try:
                headers.update(self.get_headers(accept_type="json"))
            except (ValueError, TypeError):
                headers = self.get_headers(accept_type="json")

        resp, resp_body = self.raw_request(url, method,
                                           headers=headers, body=body)
        self._log_request(method, url, resp, req_headers=headers,
                          req_body='<omitted>', resp_body=resp_body)

        if resp.status in [401, 403]:
            resp_body = json.loads(resp_body)
            raise exceptions.Unauthorized(resp_body['error']['message'])
        elif resp.status not in [200, 201, 204]:
            raise exceptions.IdentityError(
                'Unexpected status code {0}'.format(resp.status))

        return resp, json.loads(resp_body)
    def test_old_fields_exists(self):
        result = db.execute(sa.select([self.meta.tables["plugins"].c.name]))
        self.assertEqual(result.fetchone()[0], "test_plugin")

        result = db.execute(sa.select([self.meta.tables["plugins"].c.title]))
        self.assertEqual(result.fetchone()[0], "Test plugin")

        result = db.execute(sa.select([self.meta.tables["plugins"].c.version]))
        self.assertEqual(result.fetchone()[0], "1.0.0")

        result = db.execute(sa.select([self.meta.tables["plugins"].c.description]))
        self.assertEqual(result.fetchone()[0], "Test plugin for Fuel")

        result = db.execute(sa.select([self.meta.tables["plugins"].c.homepage]))
        self.assertEqual(result.fetchone()[0], "http://fuel_plugins.test_plugin.com")

        result = db.execute(sa.select([self.meta.tables["plugins"].c.package_version]))
        self.assertEqual(result.fetchone()[0], "3.0.0")

        result = db.execute(sa.select([self.meta.tables["plugins"].c.groups]))
        self.assertEqual(jsonutils.loads(result.fetchone()[0]), ["tgroup"])

        result = db.execute(sa.select([self.meta.tables["plugins"].c.authors]))
        self.assertEqual(jsonutils.loads(result.fetchone()[0]), ["tauthor"])

        result = db.execute(sa.select([self.meta.tables["plugins"].c.licenses]))
        self.assertEqual(jsonutils.loads(result.fetchone()[0]), ["tlicense"])

        result = db.execute(sa.select([self.meta.tables["plugins"].c.releases]))
        self.assertEqual(jsonutils.loads(result.fetchone()[0]), [{"repository_path": "repositories/ubuntu"}])

        result = db.execute(sa.select([self.meta.tables["plugins"].c.fuel_version]))
        self.assertEqual(jsonutils.loads(result.fetchone()[0]), ["6.1", "7.0"])
 def test_new_fields_exists_and_empty(self):
     # check node_nic_interfaces fields
     result = db.execute(sa.select([self.meta.tables["node_nic_interfaces"].c.offloading_modes]))
     self.assertEqual(jsonutils.loads(result.fetchone()[0]), [])
     # the same for bond interfaces
     result = db.execute(sa.select([self.meta.tables["node_bond_interfaces"].c.offloading_modes]))
     self.assertEqual(jsonutils.loads(result.fetchone()[0]), [])
    def test_update_item(self):
        volume_type = self._default_volume_type

        # Create Encryption Specs
        create_body = {"encryption": {'cipher': 'cipher',
                                      'control_location': 'front-end',
                                      'key_size': 128,
                                      'provider': 'fake_provider'}}
        self._create_type_and_encryption(volume_type, create_body)

        # Update Encryption Specs
        update_body = {"encryption": {'key_size': 512,
                                      'provider': 'fake_provider2'}}

        res = self.\
            _get_response(volume_type, req_method='PUT',
                          req_body=jsonutils.dump_as_bytes(update_body),
                          req_headers='application/json',
                          url='/v2/%s/types/%s/encryption/' +
                              fake.ENCRYPTION_KEY_ID)

        res_dict = jsonutils.loads(res.body)
        self.assertEqual(512, res_dict['encryption']['key_size'])
        self.assertEqual('fake_provider2', res_dict['encryption']['provider'])

        # Get Encryption Specs
        res = self._get_response(volume_type)
        res_dict = jsonutils.loads(res.body)

        # Confirm Encryption Specs
        self.assertEqual(512, res_dict['key_size'])
        self.assertEqual('fake_provider2', res_dict['provider'])

        db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
    def live_migration(self, context, migrate_data):
        """Start the live migration.

        :param context: security context
        :param migrate_data: a PowerVMLiveMigrateData object
        """
        LOG.debug("Starting migration.", instance=self.instance)
        LOG.debug("Migrate data: %s" % migrate_data)

        # The passed in mig data has more info (dest data added), so replace
        self.mig_data = migrate_data
        # Get the vFC and vSCSI live migration mappings
        vol_data = migrate_data.vol_data
        vfc_mappings = vol_data.get('vfc_lpm_mappings')
        if vfc_mappings is not None:
            vfc_mappings = jsonutils.loads(vfc_mappings)
        vscsi_mappings = vol_data.get('vscsi_lpm_mappings')
        if vscsi_mappings is not None:
            vscsi_mappings = jsonutils.loads(vscsi_mappings)

        try:
            # Migrate the LPAR!
            mig.migrate_lpar(
                self.lpar_w, self.mig_data.dest_sys_name,
                validate_only=False, tgt_mgmt_svr=self.mig_data.dest_ip,
                tgt_mgmt_usr=self.mig_data.dest_user_id,
                virtual_fc_mappings=vfc_mappings,
                virtual_scsi_mappings=vscsi_mappings,
                sdn_override=True, vlan_check_override=True)

        except Exception:
            LOG.error(_LE("Live migration failed."), instance=self.instance)
            raise
        finally:
            LOG.debug("Finished migration.", instance=self.instance)
 def test_no_echo_false(self):
     p = new_parameter("echoic", {"Type": self.p_type, "NoEcho": "false"}, self.value)
     self.assertFalse(p.hidden())
     if self.p_type == "Json":
         self.assertEqual(json.loads(self.expected), json.loads(str(p)))
     else:
         self.assertEqual(self.expected, str(p))
Exemple #25
0
    def test_task_schema_api(self):
        # 0. GET /schemas/task
        # Verify schema for task
        path = "/v2/schemas/task"
        response, content = self.http.request(path, 'GET',
                                              headers=minimal_task_headers())
        self.assertEqual(http_client.OK, response.status)

        schema = tasks.get_task_schema()
        expected_schema = schema.minimal()
        data = json.loads(content)
        self.assertIsNotNone(data)
        self.assertEqual(expected_schema, data)

        # 1. GET /schemas/tasks
        # Verify schema for tasks
        path = "/v2/schemas/tasks"
        response, content = self.http.request(path, 'GET',
                                              headers=minimal_task_headers())
        self.assertEqual(http_client.OK, response.status)

        schema = tasks.get_collection_schema()
        expected_schema = schema.minimal()
        data = json.loads(content)
        self.assertIsNotNone(data)
        self.assertEqual(expected_schema, data)

        # NOTE(nikhil): wait for all task executions to finish before exiting
        # else there is a risk of running into deadlock
        self._wait_on_task_execution()
    def test_job_executions_update(self):
        url = self.URL + '/job-executions/id'
        self.responses.patch(url, status_code=202, json=self.update_json)

        # check that all parameters will be updated
        resp = self.client.job_executions.update("id", **self.update_json)
        self.assertEqual(url, self.responses.last_request.url)
        self.assertIsInstance(resp, je.JobExecution)
        self.assertEqual(self.update_json,
                         json.loads(self.responses.last_request.body))

        # check that parameters will not be updated
        self.client.job_executions.update("id")
        self.assertEqual(url, self.responses.last_request.url)
        self.assertEqual({},
                         json.loads(self.responses.last_request.body))

        # check that all parameters will be unset
        unset_json = {
            "is_public": None, "is_protected": None
        }

        self.client.job_executions.update("id", **unset_json)
        self.assertEqual(url, self.responses.last_request.url)
        self.assertEqual(unset_json,
                         json.loads(self.responses.last_request.body))
Exemple #27
0
 def _update_router(self, router_id, set_enable_snat, **kwargs):
     uri = "%s/routers/%s" % (self.uri_prefix, router_id)
     resp, body = self.get(uri)
     self.expected_success(200, resp.status)
     body = json.loads(body)
     update_body = {}
     update_body["name"] = kwargs.get("name", body["router"]["name"])
     update_body["admin_state_up"] = kwargs.get("admin_state_up", body["router"]["admin_state_up"])
     if "description" in kwargs:
         update_body["description"] = kwargs["description"]
     cur_gw_info = body["router"]["external_gateway_info"]
     if cur_gw_info:
         # TODO(kevinbenton): setting the external gateway info is not
         # allowed for a regular tenant. If the ability to update is also
         # merged, a test case for this will need to be added similar to
         # the SNAT case.
         cur_gw_info.pop("external_fixed_ips", None)
         if not set_enable_snat:
             cur_gw_info.pop("enable_snat", None)
     update_body["external_gateway_info"] = kwargs.get(
         "external_gateway_info", body["router"]["external_gateway_info"]
     )
     if "distributed" in kwargs:
         update_body["distributed"] = kwargs["distributed"]
     update_body = dict(router=update_body)
     update_body = json.dumps(update_body)
     resp, body = self.put(uri, update_body)
     self.expected_success(200, resp.status)
     body = json.loads(body)
     return service_client.ResponseBody(resp, body)
    def wait_for_server_status_to_active(self, server_id, status):
        """Waits for a server to reach a given status."""
        build_timeout = CONF.compute.build_timeout
        build_interval = CONF.boto.build_interval
        region = CONF.compute.region
        rs_client = rest_client.RestClient(self.auth_provider,
                                           "compute", region)
        resp, body = rs_client.get("servers/%s" % str(server_id))
        body = jsonutils.loads(body)
        server_status = body['server']['status']
        start = int(time.time())

        while server_status != status:
            time.sleep(build_interval)
            rs_client = rest_client.RestClient(self.auth_provider,
                                               "compute", region)
            resp, body = rs_client.get("servers/%s" % str(server_id))
            body = jsonutils.loads(body)
            server_status = body['server']['status']
            if int(time.time()) - start >= build_timeout:
                message = ('server %s failed to reach'
                           ' %s status (current %s) '
                           'within the required time (%s s).' %
                           (server_id, status, server_status,
                            build_timeout))
                raise exceptions.TimeoutException(message)
Exemple #29
0
def create_node(ctx, cluster, profile, **kwargs):
    if cluster:
        cluster_id = cluster.id
        index = db_api.cluster_next_index(ctx, cluster_id)
    else:
        cluster_id = ''
        index = -1

    values = {
        'name': 'test_node_name',
        'physical_id': UUID1,
        'cluster_id': cluster_id,
        'profile_id': profile.id,
        'project': ctx.project,
        'index': index,
        'role': None,
        'created_at': None,
        'updated_at': None,
        'status': 'ACTIVE',
        'status_reason': 'create complete',
        'meta_data': jsonutils.loads('{"foo": "123"}'),
        'data': jsonutils.loads('{"key1": "value1"}'),
    }
    values.update(kwargs)
    return db_api.node_create(ctx, values)
Exemple #30
0
def load_json_from_file(name, json_file, json_str, default_value):
    if json_file:
        json_file_abspath = cfg.CONF.find_file(json_file)
        if not json_file_abspath:
            LOG.error(_LE('Failed to load %(name)s_file'
                          '"%(json_file)s": file not found'),
                      {'name': name, 'json_file': json_file})
            raise cfg.Error(_('NECNWA option parse error'))
        try:
            with open(json_file_abspath) as f:
                return jsonutils.loads(f.read())
        except Exception as e:
            LOG.error(_LE('Failed to load %(name)s_file '
                          '"%(json_file)s": %(reason)s'),
                      {'reason': e, 'name': name, 'json_file': json_file})
            raise cfg.Error(_('NECNWA option parse error'))
    elif json_str:
        try:
            return jsonutils.loads(json_str)
        except Exception as e:
            LOG.error(_LE('NECNWA option error during loading %(name)s '
                          '(%(data)s): %(reason)s'),
                      {'reason': e, 'name': name, 'data': json_str})
            raise cfg.Error(_('NECNWA option parse error'))
    else:
        LOG.warning(_LW('%(name)s is not configured. '
                        'Make sure to set [NWA] %(name)s_file '
                        'in NWA plugin configuration file. '
                        'Using %(default)s as default value.'),
                    {'name': name, 'default': default_value})
        return default_value
Exemple #31
0
    def get_signals(self):
        try:
            container = self.client().get_container(self.stack.id)
        except Exception as exc:
            self.client_plugin().ignore_not_found(exc)
            return

        index = container[1]
        if not index:  # Swift objects were deleted by user
            return None

        # Remove objects in that are for other handle resources, since
        # multiple SwiftSignalHandle resources in the same stack share
        # a container
        filtered = [obj for obj in index if self.obj_name in obj['name']]

        # Fetch objects from Swift and filter results
        obj_bodies = []
        for obj in filtered:
            try:
                signal = self.client().get_object(self.stack.id, obj['name'])
            except Exception as exc:
                self.client_plugin().ignore_not_found(exc)
                continue

            body = signal[1]
            if body == swift.IN_PROGRESS:  # Ignore the initial object
                continue
            if body == "":
                obj_bodies.append({})
                continue
            try:
                obj_bodies.append(jsonutils.loads(body))
            except ValueError:
                raise exception.Error(
                    _("Failed to parse JSON data: %s") % body)

        # Set default values on each signal
        signals = []
        signal_num = 1
        for signal in obj_bodies:

            # Remove previous signals with the same ID
            sig_id = self.UNIQUE_ID
            ids = [s.get(sig_id) for s in signals if sig_id in s]
            if ids and sig_id in signal and ids.count(signal[sig_id]) > 0:
                [
                    signals.remove(s) for s in signals
                    if s.get(sig_id) == signal[sig_id]
                ]

            # Make sure all fields are set, since all are optional
            signal.setdefault(self.DATA, None)
            unique_id = signal.setdefault(sig_id, signal_num)
            reason = 'Signal %s received' % unique_id
            signal.setdefault(self.REASON, reason)
            signal.setdefault(self.STATUS, self.STATUS_SUCCESS)

            signals.append(signal)
            signal_num += 1

        return signals
Exemple #32
0
 def get_password(self, server_id):
     resp, body = self.get("servers/%s/os-server-password" %
                           server_id)
     body = json.loads(body)
     self.validate_response(schema.get_password, resp, body)
     return service_client.ResponseBody(resp, body)
Exemple #33
0
 def list_addresses(self, server_id):
     """Lists all addresses for a server."""
     resp, body = self.get("servers/%s/ips" % server_id)
     body = json.loads(body)
     self.validate_response(schema.list_addresses, resp, body)
     return service_client.ResponseBody(resp, body['addresses'])
Exemple #34
0
 def show_server(self, server_id):
     """Returns the details of an existing server."""
     resp, body = self.get("servers/%s" % server_id)
     body = json.loads(body)
     self.validate_response(schema.get_server, resp, body)
     return service_client.ResponseBody(resp, body['server'])
Exemple #35
0
 def get_role(self, role_id):
     """Get a role by its id."""
     resp, body = self.get('OS-KSADM/roles/%s' % role_id)
     self.expected_success(200, resp.status)
     body = json.loads(body)
     return service_client.ResponseBody(resp, body['role'])
Exemple #36
0
 def list_tenants(self):
     """Returns tenants."""
     resp, body = self.get('tenants')
     self.expected_success(200, resp.status)
     body = json.loads(body)
     return service_client.ResponseBodyList(resp, body['tenants'])
Exemple #37
0
 def equals(self, rhs):
     return jsonutils.loads(self.other_json) == jsonutils.loads(rhs)
 def _get_servers(self, body):
     return jsonutils.loads(body).get('servers')
Exemple #39
0
 def connection_info(self):
     return jsonutils.loads(self.connection_info_json)
Exemple #40
0
 def from_json(cls, data):
     '''Instantiate current class from JSON encoded string'''
     return cls(**jsonutils.loads(data))
Exemple #41
0
    def assertJsonEqual(self, expected, observed, message=''):
        """Asserts that 2 complex data structures are json equivalent.

        We use data structures which serialize down to json throughout
        the code, and often times we just need to know that these are
        json equivalent. This means that list order is not important,
        and should be sorted.

        Because this is a recursive set of assertions, when failure
        happens we want to expose both the local failure and the
        global view of the 2 data structures being compared. So a
        MismatchError which includes the inner failure as the
        mismatch, and the passed in expected / observed as matchee /
        matcher.

        """
        if isinstance(expected, six.string_types):
            expected = jsonutils.loads(expected)
        if isinstance(observed, six.string_types):
            observed = jsonutils.loads(observed)

        def sort_key(x):
            if isinstance(x, (set, list)) or isinstance(x, datetime.datetime):
                return str(x)
            if isinstance(x, dict):
                items = ((sort_key(key), sort_key(value))
                         for key, value in x.items())
                return sorted(items)
            return x

        def inner(expected, observed, path='root'):
            if isinstance(expected, dict) and isinstance(observed, dict):
                self.assertEqual(
                    len(expected), len(observed),
                    ('path: %s. Different dict key sets\n'
                     'expected=%s\n'
                     'observed=%s\n'
                     'difference=%s') %
                    (path, sorted(expected.keys()), sorted(observed.keys()),
                     list(
                         set(expected.keys()).symmetric_difference(
                             set(observed.keys())))))
                expected_keys = sorted(expected)
                observed_keys = sorted(observed)
                self.assertEqual(expected_keys, observed_keys,
                                 'path: %s. Dict keys are not equal' % path)
                for key in list(six.iterkeys(expected)):
                    inner(expected[key], observed[key], path + '.%s' % key)
            elif (isinstance(expected, (list, tuple, set))
                  and isinstance(observed, (list, tuple, set))):
                self.assertEqual(
                    len(expected), len(observed),
                    ('path: %s. Different list items\n'
                     'expected=%s\n'
                     'observed=%s\n'
                     'difference=%s') %
                    (path, sorted(expected, key=sort_key),
                     sorted(observed, key=sort_key), [
                         a for a in itertools.chain(expected, observed)
                         if (a not in expected) or (a not in observed)
                     ]))

                expected_values_iter = iter(sorted(expected, key=sort_key))
                observed_values_iter = iter(sorted(observed, key=sort_key))

                for i in range(len(expected)):
                    inner(next(expected_values_iter),
                          next(observed_values_iter), path + '[%s]' % i)
            else:
                self.assertEqual(expected, observed, 'path: %s' % path)

        try:
            inner(expected, observed)
        except testtools.matchers.MismatchError as e:
            difference = e.mismatch.describe()
            if message:
                message = 'message: %s\n' % message
            msg = "\nexpected:\n%s\nactual:\n%s\ndifference:\n%s\n%s" % (
                pprint.pformat(expected), pprint.pformat(observed), difference,
                message)
            error = AssertionError(msg)
            error.expected = expected
            error.observed = observed
            error.difference = difference
            raise error
Exemple #42
0
    def deserialize(self, object_str):
        """Deserialize an Ironic object."""

        return json.loads(object_str)
Exemple #43
0
 def extend_response_data(req, res):
     data = jsonutils.loads(res.body)
     data['FOXNSOX:extended_key'] = req.GET.get('extended_key')
     res.body = jsonutils.dump_as_bytes(data)
     return res
Exemple #44
0
def _json_loads(properties, attr):
    prop = properties[attr]
    if isinstance(prop, six.string_types):
        properties[attr] = jsonutils.loads(prop)
Exemple #45
0
 def show_hypervisor_uptime(self, hypervisor_id):
     """Display the uptime of the specified hypervisor."""
     resp, body = self.get('os-hypervisors/%s/uptime' % hypervisor_id)
     body = json.loads(body)
     self.validate_response(schema.get_hypervisor_uptime, resp, body)
     return rest_client.ResponseBody(resp, body)
Exemple #46
0
 def _update_handler(req, res):
     data = jsonutils.loads(res.body)
     data['uneditable'] = req.params['uneditable']
     res.body = jsonutils.dump_as_bytes(data)
     return res
Exemple #47
0
 def list_servers_on_hypervisor(self, hypervisor_name):
     """List instances belonging to the specified hypervisor."""
     resp, body = self.get('os-hypervisors/%s/servers' % hypervisor_name)
     body = json.loads(body)
     self.validate_response(schema.get_hypervisors_servers, resp, body)
     return rest_client.ResponseBody(resp, body)
Exemple #48
0
 def search_hypervisor(self, hypervisor_name):
     """Search specified hypervisor."""
     resp, body = self.get('os-hypervisors/%s/search' % hypervisor_name)
     body = json.loads(body)
     self.validate_response(schema.list_search_hypervisors, resp, body)
     return rest_client.ResponseBody(resp, body)
Exemple #49
0
def replication_load(options, args):
    """%(prog)s load <server:port> <path>

    Load the contents of a local directory into glance.

    server:port: the location of the glance instance.
    path:        a directory on disk containing the data.
    """

    # Make sure server and path are provided
    if len(args) < 2:
        raise TypeError(_("Too few arguments."))

    path = args.pop()
    server, port = utils.parse_valid_host_port(args.pop())

    imageservice = get_image_service()
    client = imageservice(http.HTTPConnection(server, port),
                          options.targettoken)

    updated = []

    for ent in os.listdir(path):
        if uuidutils.is_uuid_like(ent):
            image_uuid = ent
            LOG.info(_LI('Considering: %s'), image_uuid)

            meta_file_name = os.path.join(path, image_uuid)
            with open(meta_file_name) as meta_file:
                meta = jsonutils.loads(meta_file.read())

            # Remove keys which don't make sense for replication
            for key in options.dontreplicate.split(' '):
                if key in meta:
                    LOG.debug('Stripping %(header)s from saved '
                              'metadata', {'header': key})
                    del meta[key]

            if _image_present(client, image_uuid):
                # NOTE(mikal): Perhaps we just need to update the metadata?
                # Note that we don't attempt to change an image file once it
                # has been uploaded.
                LOG.debug('Image %s already present', image_uuid)
                headers = client.get_image_meta(image_uuid)
                for key in options.dontreplicate.split(' '):
                    if key in headers:
                        LOG.debug(
                            'Stripping %(header)s from target '
                            'metadata', {'header': key})
                        del headers[key]

                if _dict_diff(meta, headers):
                    LOG.info(_LI('Image %s metadata has changed'), image_uuid)
                    headers, body = client.add_image_meta(meta)
                    _check_upload_response_headers(headers, body)
                    updated.append(meta['id'])

            else:
                if not os.path.exists(os.path.join(path, image_uuid + '.img')):
                    LOG.debug('%s dump is missing image data, skipping',
                              image_uuid)
                    continue

                # Upload the image itself
                with open(os.path.join(path, image_uuid + '.img')) as img_file:
                    try:
                        headers, body = client.add_image(meta, img_file)
                        _check_upload_response_headers(headers, body)
                        updated.append(meta['id'])
                    except exc.HTTPConflict:
                        LOG.error(
                            _LE(IMAGE_ALREADY_PRESENT_MESSAGE) %
                            image_uuid)  # noqa

    return updated
Exemple #50
0
 def show_hypervisor_statistics(self):
     """Get hypervisor statistics over all compute nodes."""
     resp, body = self.get('os-hypervisors/statistics')
     body = json.loads(body)
     self.validate_response(schema.get_hypervisor_statistics, resp, body)
     return rest_client.ResponseBody(resp, body)
 def put_servers_1234(request, context):
     body = jsonutils.loads(request.body)
     assert list(body) == ['server']
     fakes.assert_has_keys(body['server'],
                           optional=['name', 'adminPass'])
     return request.body
Exemple #52
0
    def request(self, opt, content_type="application/json", **message):
        '''Issues request to controller.'''
        self.message = self._render(getattr(templates, opt), **message)
        method = self.message['method']
        url = self.message['path']
        body = self.message['body'] if 'body' in self.message else None
        g = eventlet_request.GenericRequestEventlet(
            self,
            method,
            url,
            body,
            content_type,
            auto_login=True,
            http_timeout=self._http_timeout,
            retries=self._retries,
            redirects=self._redirects)
        g.start()
        response = g.join()

        # response is a modified HTTPResponse object or None.
        # response.read() will not work on response as the underlying library
        # request_eventlet.ApiRequestEventlet has already called this
        # method in order to extract the body and headers for processing.
        # ApiRequestEventlet derived classes call .read() and
        # .getheaders() on the HTTPResponse objects and store the results in
        # the response object's .body and .headers data members for future
        # access.

        if response is None:
            # Timeout.
            LOG.error(_LE('Request timed out: %(method)s to %(url)s'), {
                'method': method,
                'url': url
            })
            raise exception.RequestTimeout()

        status = response.status
        if status == 401:
            raise exception.UnAuthorizedRequest()
        # Fail-fast: Check for exception conditions and raise the
        # appropriate exceptions for known error codes.
        if status in [404]:
            LOG.warning(
                _LW("Resource not found. Response status: %(status)s, "
                    "response body: %(response.body)s"), {
                        'status': status,
                        'response.body': response.body
                    })
            exception.ERROR_MAPPINGS[status](response)
        elif status in exception.ERROR_MAPPINGS:
            LOG.error(_LE("Received error code: %s"), status)
            LOG.error(_LE("Server Error Message: %s"), response.body)
            exception.ERROR_MAPPINGS[status](response)

        # Continue processing for non-error condition.
        if (status != 200 and status != 201 and status != 204):
            LOG.error(
                _LE("%(method)s to %(url)s, unexpected response code: "
                    "%(status)d (content = '%(body)s')"), {
                        'method': method,
                        'url': url,
                        'status': response.status,
                        'body': response.body
                    })
            return None

        if url == jsonutils.loads(templates.LOGOUT)['path']:
            return response.body
        else:
            try:
                return jsonutils.loads(response.body)
            except UnicodeDecodeError:
                LOG.debug(
                    "The following strings cannot be decoded with "
                    "'utf-8, trying 'ISO-8859-1' instead. %(body)s",
                    {'body': response.body})
                return jsonutils.loads(response.body, encoding='ISO-8859-1')
            except Exception as e:
                LOG.error(_LE("Decode error, the response.body %(body)s"),
                          {'body': response.body})
                raise e
    def setUp(self):
        super(Examples, self).setUp()

        # The data for several tests are signed using openssl and are stored in
        # files in the signing subdirectory.  In order to keep the values
        # consistent between the tests and the signed documents, we read them
        # in for use in the tests.
        with open(os.path.join(CMSDIR, 'auth_token_scoped.json')) as f:
            self.TOKEN_SCOPED_DATA = cms.cms_to_token(f.read())

        with open(os.path.join(CMSDIR, 'auth_token_scoped.pem')) as f:
            self.SIGNED_TOKEN_SCOPED = cms.cms_to_token(f.read())
        self.SIGNED_TOKEN_SCOPED_HASH = _hash_signed_token_safe(
            self.SIGNED_TOKEN_SCOPED)
        self.SIGNED_TOKEN_SCOPED_HASH_SHA256 = _hash_signed_token_safe(
            self.SIGNED_TOKEN_SCOPED, mode='sha256')
        with open(os.path.join(CMSDIR, 'auth_token_unscoped.pem')) as f:
            self.SIGNED_TOKEN_UNSCOPED = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_scoped.pem')) as f:
            self.SIGNED_v3_TOKEN_SCOPED = cms.cms_to_token(f.read())
        self.SIGNED_v3_TOKEN_SCOPED_HASH = _hash_signed_token_safe(
            self.SIGNED_v3_TOKEN_SCOPED)
        self.SIGNED_v3_TOKEN_SCOPED_HASH_SHA256 = _hash_signed_token_safe(
            self.SIGNED_v3_TOKEN_SCOPED, mode='sha256')
        with open(os.path.join(CMSDIR, 'auth_token_revoked.pem')) as f:
            self.REVOKED_TOKEN = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_scoped_expired.pem')) as f:
            self.SIGNED_TOKEN_SCOPED_EXPIRED = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_revoked.pem')) as f:
            self.REVOKED_v3_TOKEN = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_scoped.pkiz')) as f:
            self.SIGNED_TOKEN_SCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_unscoped.pkiz')) as f:
            self.SIGNED_TOKEN_UNSCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_scoped.pkiz')) as f:
            self.SIGNED_v3_TOKEN_SCOPED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_revoked.pkiz')) as f:
            self.REVOKED_TOKEN_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_token_scoped_expired.pkiz')) as f:
            self.SIGNED_TOKEN_SCOPED_EXPIRED_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'auth_v3_token_revoked.pkiz')) as f:
            self.REVOKED_v3_TOKEN_PKIZ = cms.cms_to_token(f.read())
        with open(os.path.join(CMSDIR, 'revocation_list.json')) as f:
            self.REVOCATION_LIST = jsonutils.loads(f.read())
        with open(os.path.join(CMSDIR, 'revocation_list.pem')) as f:
            self.SIGNED_REVOCATION_LIST = jsonutils.dumps({'signed': f.read()})

        self.SIGNING_CERT_FILE = os.path.join(CERTDIR, 'signing_cert.pem')
        with open(self.SIGNING_CERT_FILE) as f:
            self.SIGNING_CERT = f.read()

        self.KERBEROS_BIND = 'USER@REALM'

        self.SIGNING_KEY_FILE = os.path.join(KEYDIR, 'signing_key.pem')
        with open(self.SIGNING_KEY_FILE) as f:
            self.SIGNING_KEY = f.read()

        self.SIGNING_CA_FILE = os.path.join(CERTDIR, 'cacert.pem')
        with open(self.SIGNING_CA_FILE) as f:
            self.SIGNING_CA = f.read()

        self.UUID_TOKEN_DEFAULT = "ec6c0710ec2f471498484c1b53ab4f9d"
        self.UUID_TOKEN_NO_SERVICE_CATALOG = '8286720fbe4941e69fa8241723bb02df'
        self.UUID_TOKEN_UNSCOPED = '731f903721c14827be7b2dc912af7776'
        self.UUID_TOKEN_BIND = '3fc54048ad64405c98225ce0897af7c5'
        self.UUID_TOKEN_UNKNOWN_BIND = '8885fdf4d42e4fb9879e6379fa1eaf48'
        self.VALID_DIABLO_TOKEN = 'b0cf19b55dbb4f20a6ee18e6c6cf1726'
        self.v3_UUID_TOKEN_DEFAULT = '5603457654b346fdbb93437bfe76f2f1'
        self.v3_UUID_TOKEN_UNSCOPED = 'd34835fdaec447e695a0a024d84f8d79'
        self.v3_UUID_TOKEN_DOMAIN_SCOPED = 'e8a7b63aaa4449f38f0c5c05c3581792'
        self.v3_UUID_TOKEN_BIND = '2f61f73e1c854cbb9534c487f9bd63c2'
        self.v3_UUID_TOKEN_UNKNOWN_BIND = '7ed9781b62cd4880b8d8c6788ab1d1e2'

        revoked_token = self.REVOKED_TOKEN
        if isinstance(revoked_token, six.text_type):
            revoked_token = revoked_token.encode('utf-8')
        self.REVOKED_TOKEN_HASH = utils.hash_signed_token(revoked_token)
        self.REVOKED_TOKEN_HASH_SHA256 = utils.hash_signed_token(revoked_token,
                                                                 mode='sha256')
        self.REVOKED_TOKEN_LIST = ({
            'revoked': [{
                'id': self.REVOKED_TOKEN_HASH,
                'expires': timeutils.utcnow()
            }]
        })
        self.REVOKED_TOKEN_LIST_JSON = jsonutils.dumps(self.REVOKED_TOKEN_LIST)

        revoked_v3_token = self.REVOKED_v3_TOKEN
        if isinstance(revoked_v3_token, six.text_type):
            revoked_v3_token = revoked_v3_token.encode('utf-8')
        self.REVOKED_v3_TOKEN_HASH = utils.hash_signed_token(revoked_v3_token)
        hash = utils.hash_signed_token(revoked_v3_token, mode='sha256')
        self.REVOKED_v3_TOKEN_HASH_SHA256 = hash
        self.REVOKED_v3_TOKEN_LIST = ({
            'revoked': [{
                'id': self.REVOKED_v3_TOKEN_HASH,
                'expires': timeutils.utcnow()
            }]
        })
        self.REVOKED_v3_TOKEN_LIST_JSON = jsonutils.dumps(
            self.REVOKED_v3_TOKEN_LIST)

        revoked_token_pkiz = self.REVOKED_TOKEN_PKIZ
        if isinstance(revoked_token_pkiz, six.text_type):
            revoked_token_pkiz = revoked_token_pkiz.encode('utf-8')
        self.REVOKED_TOKEN_PKIZ_HASH = utils.hash_signed_token(
            revoked_token_pkiz)
        revoked_v3_token_pkiz = self.REVOKED_v3_TOKEN_PKIZ
        if isinstance(revoked_v3_token_pkiz, six.text_type):
            revoked_v3_token_pkiz = revoked_v3_token_pkiz.encode('utf-8')
        self.REVOKED_v3_PKIZ_TOKEN_HASH = utils.hash_signed_token(
            revoked_v3_token_pkiz)

        self.REVOKED_TOKEN_PKIZ_LIST = ({
            'revoked': [
                {
                    'id': self.REVOKED_TOKEN_PKIZ_HASH,
                    'expires': timeutils.utcnow()
                },
                {
                    'id': self.REVOKED_v3_PKIZ_TOKEN_HASH,
                    'expires': timeutils.utcnow()
                },
            ]
        })
        self.REVOKED_TOKEN_PKIZ_LIST_JSON = jsonutils.dumps(
            self.REVOKED_TOKEN_PKIZ_LIST)

        self.SIGNED_TOKEN_SCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_SCOPED)
        self.SIGNED_TOKEN_UNSCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_UNSCOPED)
        self.SIGNED_v3_TOKEN_SCOPED_KEY = cms.cms_hash_token(
            self.SIGNED_v3_TOKEN_SCOPED)

        self.SIGNED_TOKEN_SCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_SCOPED_PKIZ)
        self.SIGNED_TOKEN_UNSCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_TOKEN_UNSCOPED_PKIZ)
        self.SIGNED_v3_TOKEN_SCOPED_PKIZ_KEY = cms.cms_hash_token(
            self.SIGNED_v3_TOKEN_SCOPED_PKIZ)

        self.INVALID_SIGNED_TOKEN = (
            "MIIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
            "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
            "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
            "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
            "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
            "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
            "0000000000000000000000000000000000000000000000000000000000000000"
            "1111111111111111111111111111111111111111111111111111111111111111"
            "2222222222222222222222222222222222222222222222222222222222222222"
            "3333333333333333333333333333333333333333333333333333333333333333"
            "4444444444444444444444444444444444444444444444444444444444444444"
            "5555555555555555555555555555555555555555555555555555555555555555"
            "6666666666666666666666666666666666666666666666666666666666666666"
            "7777777777777777777777777777777777777777777777777777777777777777"
            "8888888888888888888888888888888888888888888888888888888888888888"
            "9999999999999999999999999999999999999999999999999999999999999999"
            "0000000000000000000000000000000000000000000000000000000000000000")

        self.INVALID_SIGNED_PKIZ_TOKEN = (
            "PKIZ_AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
            "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
            "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
            "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
            "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
            "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
            "0000000000000000000000000000000000000000000000000000000000000000"
            "1111111111111111111111111111111111111111111111111111111111111111"
            "2222222222222222222222222222222222222222222222222222222222222222"
            "3333333333333333333333333333333333333333333333333333333333333333"
            "4444444444444444444444444444444444444444444444444444444444444444"
            "5555555555555555555555555555555555555555555555555555555555555555"
            "6666666666666666666666666666666666666666666666666666666666666666"
            "7777777777777777777777777777777777777777777777777777777777777777"
            "8888888888888888888888888888888888888888888888888888888888888888"
            "9999999999999999999999999999999999999999999999999999999999999999"
            "0000000000000000000000000000000000000000000000000000000000000000")

        # JSON responses keyed by token ID
        self.TOKEN_RESPONSES = {
            self.UUID_TOKEN_DEFAULT: {
                'access': {
                    'token': {
                        'id': self.UUID_TOKEN_DEFAULT,
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenant': {
                            'id': 'tenant_id1',
                            'name': 'tenant_name1',
                        },
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {
                                'name': 'role1'
                            },
                            {
                                'name': 'role2'
                            },
                        ],
                    },
                    'serviceCatalog': {}
                },
            },
            self.VALID_DIABLO_TOKEN: {
                'access': {
                    'token': {
                        'id': self.VALID_DIABLO_TOKEN,
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenantId': 'tenant_id1',
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {
                                'name': 'role1'
                            },
                            {
                                'name': 'role2'
                            },
                        ],
                    },
                },
            },
            self.UUID_TOKEN_UNSCOPED: {
                'access': {
                    'token': {
                        'id': self.UUID_TOKEN_UNSCOPED,
                        'expires': '2020-01-01T00:00:10.000123Z',
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {
                                'name': 'role1'
                            },
                            {
                                'name': 'role2'
                            },
                        ],
                    },
                },
            },
            self.UUID_TOKEN_NO_SERVICE_CATALOG: {
                'access': {
                    'token': {
                        'id': 'valid-token',
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenant': {
                            'id': 'tenant_id1',
                            'name': 'tenant_name1',
                        },
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {
                                'name': 'role1'
                            },
                            {
                                'name': 'role2'
                            },
                        ],
                    }
                },
            },
            self.UUID_TOKEN_BIND: {
                'access': {
                    'token': {
                        'bind': {
                            'kerberos': self.KERBEROS_BIND
                        },
                        'id': self.UUID_TOKEN_BIND,
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenant': {
                            'id': 'tenant_id1',
                            'name': 'tenant_name1',
                        },
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {
                                'name': 'role1'
                            },
                            {
                                'name': 'role2'
                            },
                        ],
                    },
                    'serviceCatalog': {}
                },
            },
            self.UUID_TOKEN_UNKNOWN_BIND: {
                'access': {
                    'token': {
                        'bind': {
                            'FOO': 'BAR'
                        },
                        'id': self.UUID_TOKEN_UNKNOWN_BIND,
                        'expires': '2020-01-01T00:00:10.000123Z',
                        'tenant': {
                            'id': 'tenant_id1',
                            'name': 'tenant_name1',
                        },
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {
                                'name': 'role1'
                            },
                            {
                                'name': 'role2'
                            },
                        ],
                    },
                    'serviceCatalog': {}
                },
            },
            self.v3_UUID_TOKEN_DEFAULT: {
                'token': {
                    'expires_at':
                    '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'project': {
                        'id': 'tenant_id1',
                        'name': 'tenant_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'roles': [
                        {
                            'name': 'role1',
                            'id': 'Role1'
                        },
                        {
                            'name': 'role2',
                            'id': 'Role2'
                        },
                    ],
                    'catalog': {}
                }
            },
            self.v3_UUID_TOKEN_UNSCOPED: {
                'token': {
                    'expires_at': '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    }
                }
            },
            self.v3_UUID_TOKEN_DOMAIN_SCOPED: {
                'token': {
                    'expires_at':
                    '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'domain': {
                        'id': 'domain_id1',
                        'name': 'domain_name1',
                    },
                    'roles': [
                        {
                            'name': 'role1',
                            'id': 'Role1'
                        },
                        {
                            'name': 'role2',
                            'id': 'Role2'
                        },
                    ],
                    'catalog': {}
                }
            },
            self.SIGNED_TOKEN_SCOPED_KEY: {
                'access': {
                    'token': {
                        'id': self.SIGNED_TOKEN_SCOPED_KEY,
                        'expires': '2020-01-01T00:00:10.000123Z',
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'tenantId': 'tenant_id1',
                        'tenantName': 'tenant_name1',
                        'roles': [
                            {
                                'name': 'role1'
                            },
                            {
                                'name': 'role2'
                            },
                        ],
                    },
                },
            },
            self.SIGNED_TOKEN_UNSCOPED_KEY: {
                'access': {
                    'token': {
                        'id': self.SIGNED_TOKEN_UNSCOPED_KEY,
                        'expires': '2020-01-01T00:00:10.000123Z',
                    },
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'roles': [
                            {
                                'name': 'role1'
                            },
                            {
                                'name': 'role2'
                            },
                        ],
                    },
                },
            },
            self.SIGNED_v3_TOKEN_SCOPED_KEY: {
                'token': {
                    'expires_at': '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'project': {
                        'id': 'tenant_id1',
                        'name': 'tenant_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'roles': [{
                        'name': 'role1'
                    }, {
                        'name': 'role2'
                    }],
                    'catalog': {}
                }
            },
            self.v3_UUID_TOKEN_BIND: {
                'token': {
                    'bind': {
                        'kerberos': self.KERBEROS_BIND
                    },
                    'methods': ['password'],
                    'expires_at':
                    '2020-01-01T00:00:10.000123Z',
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'project': {
                        'id': 'tenant_id1',
                        'name': 'tenant_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'roles': [
                        {
                            'name': 'role1',
                            'id': 'Role1'
                        },
                        {
                            'name': 'role2',
                            'id': 'Role2'
                        },
                    ],
                    'catalog': {}
                }
            },
            self.v3_UUID_TOKEN_UNKNOWN_BIND: {
                'token': {
                    'bind': {
                        'FOO': 'BAR'
                    },
                    'expires_at':
                    '2020-01-01T00:00:10.000123Z',
                    'methods': ['password'],
                    'user': {
                        'id': 'user_id1',
                        'name': 'user_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'project': {
                        'id': 'tenant_id1',
                        'name': 'tenant_name1',
                        'domain': {
                            'id': 'domain_id1',
                            'name': 'domain_name1'
                        }
                    },
                    'roles': [
                        {
                            'name': 'role1',
                            'id': 'Role1'
                        },
                        {
                            'name': 'role2',
                            'id': 'Role2'
                        },
                    ],
                    'catalog': {}
                }
            },
        }
        self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_TOKEN_SCOPED_KEY])
        self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_TOKEN_UNSCOPED_KEY])
        self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_PKIZ_KEY] = (
            self.TOKEN_RESPONSES[self.SIGNED_v3_TOKEN_SCOPED_KEY])

        self.JSON_TOKEN_RESPONSES = dict([
            (k, jsonutils.dumps(v))
            for k, v in six.iteritems(self.TOKEN_RESPONSES)
        ])
Exemple #54
0
 def test_forbidden_title(self):
     e = exception.Forbidden()
     resp = wsgi.render_exception(e)
     j = jsonutils.loads(resp.body)
     self.assertEqual('Forbidden', e.title)
     self.assertEqual('Forbidden', j['error'].get('title'))
Exemple #55
0
 def assert_attributes(self, body, attr):
     """Assert that the given request has a certain set attributes."""
     ref = jsonutils.loads(body)
     self._require_attributes(ref, attr)
Exemple #56
0
 def hydrate(cls, network_info):
     if isinstance(network_info, six.string_types):
         network_info = jsonutils.loads(network_info)
     return cls([VIF.hydrate(vif) for vif in network_info])
Exemple #57
0
    def _populate_roles(self, token_data, user_id, domain_id, project_id,
                        trust, access_token):
        if 'roles' in token_data:
            # no need to repopulate roles
            return

        if access_token:
            filtered_roles = []
            authed_role_ids = jsonutils.loads(access_token['role_ids'])
            all_roles = self.role_api.list_roles()
            for role in all_roles:
                for authed_role in authed_role_ids:
                    if authed_role == role['id']:
                        filtered_roles.append({'id': role['id'],
                                               'name': role['name']})
            token_data['roles'] = filtered_roles
            return

        if CONF.trust.enabled and trust:
            # If redelegated_trust_id is set, then we must traverse the
            # trust_chain in order to determine who the original trustor is. We
            # need to do this because the user ID of the original trustor helps
            # us determine scope in the redelegated context.
            if trust.get('redelegated_trust_id'):
                trust_chain = self.trust_api.get_trust_pedigree(trust['id'])
                token_user_id = trust_chain[-1]['trustor_user_id']
            else:
                token_user_id = trust['trustor_user_id']

            token_project_id = trust['project_id']
            # trusts do not support domains yet
            token_domain_id = None
        else:
            token_user_id = user_id
            token_project_id = project_id
            token_domain_id = domain_id

        if token_domain_id or token_project_id:
            filtered_roles = []
            if CONF.trust.enabled and trust:
                # First expand out any roles that were in the trust to include
                # any implied roles, whether global or domain specific
                refs = [{'role_id': role['id']} for role in trust['roles']]
                effective_trust_roles = (
                    self.assignment_api.add_implied_roles(refs))
                # Now get the current role assignments for the trustor,
                # including any domain specific roles.
                assignment_list = self.assignment_api.list_role_assignments(
                    user_id=token_user_id,
                    project_id=token_project_id,
                    effective=True, strip_domain_roles=False)
                current_effective_trustor_roles = (
                    list(set([x['role_id'] for x in assignment_list])))
                # Go through each of the effective trust roles, making sure the
                # trustor still has them, if any have been removed, then we
                # will treat the trust as invalid
                for trust_role in effective_trust_roles:

                    match_roles = [x for x in current_effective_trustor_roles
                                   if x == trust_role['role_id']]
                    if match_roles:
                        role = self.role_api.get_role(match_roles[0])
                        if role['domain_id'] is None:
                            filtered_roles.append(role)
                    else:
                        raise exception.Forbidden(
                            _('Trustee has no delegated roles.'))
            else:
                for role in self._get_roles_for_user(token_user_id,
                                                     token_domain_id,
                                                     token_project_id):
                    filtered_roles.append({'id': role['id'],
                                           'name': role['name']})

            # user has no project or domain roles, therefore access denied
            if not filtered_roles:
                if token_project_id:
                    msg = _('User %(user_id)s has no access '
                            'to project %(project_id)s') % {
                                'user_id': user_id,
                                'project_id': token_project_id}
                else:
                    msg = _('User %(user_id)s has no access '
                            'to domain %(domain_id)s') % {
                                'user_id': user_id,
                                'domain_id': token_domain_id}
                LOG.debug(msg)
                raise exception.Unauthorized(msg)

            token_data['roles'] = filtered_roles
Exemple #58
0
 def post_os_keypairs(request, context):
     body = jsonutils.loads(request.body)
     assert list(body) == ['keypair']
     fakes.assert_has_keys(body['keypair'], required=['name'])
     return {'keypair': keypair}
Exemple #59
0
    def _from_db_object(context, compute, db_compute):
        special_cases = set([
            'stats',
            'supported_hv_specs',
            'host',
            'pci_device_pools',
            'uuid',
        ])
        fields = set(compute.fields) - special_cases
        for key in fields:
            value = db_compute[key]
            # NOTE(sbauza): Since all compute nodes don't possibly run the
            # latest RT code updating allocation ratios, we need to provide
            # a backwards compatible way of hydrating them.
            # As we want to care about our operators and since we don't want to
            # ask them to change their configuration files before upgrading, we
            # prefer to hardcode the default values for the ratios here until
            # the next release (Newton) where the opt default values will be
            # restored for both cpu (16.0), ram (1.5) and disk (1.0)
            # allocation ratios.
            # TODO(sbauza): Remove that in the next major version bump where
            # we break compatibilility with old Liberty computes
            if (key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio'
                    or key == 'disk_allocation_ratio'):
                if value == 0.0:
                    # Operator has not yet provided a new value for that ratio
                    # on the compute node
                    value = None
                if value is None:
                    # ResourceTracker is not updating the value (old node)
                    # or the compute node is updated but the default value has
                    # not been changed
                    value = getattr(CONF, key)
                    if value == 0.0 and key == 'cpu_allocation_ratio':
                        # It's not specified either on the controller
                        value = 16.0
                    if value == 0.0 and key == 'ram_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.5
                    if value == 0.0 and key == 'disk_allocation_ratio':
                        # It's not specified either on the controller
                        value = 1.0
            compute[key] = value

        stats = db_compute['stats']
        if stats:
            compute['stats'] = jsonutils.loads(stats)

        sup_insts = db_compute.get('supported_instances')
        if sup_insts:
            hv_specs = jsonutils.loads(sup_insts)
            hv_specs = [
                objects.HVSpec.from_list(hv_spec) for hv_spec in hv_specs
            ]
            compute['supported_hv_specs'] = hv_specs

        pci_stats = db_compute.get('pci_stats')
        if pci_stats is not None:
            pci_stats = pci_device_pool.from_pci_stats(pci_stats)
        compute.pci_device_pools = pci_stats
        compute._context = context

        # Make sure that we correctly set the host field depending on either
        # host column is present in the table or not
        compute._host_from_db_object(compute, db_compute)

        # NOTE(danms): Remove this conditional load (and remove uuid from
        # the list of special_cases above) once we're in Newton and have
        # enforced that all UUIDs in the database are not NULL.
        if db_compute.get('uuid'):
            compute.uuid = db_compute['uuid']

        compute.obj_reset_changes()

        # NOTE(danms): This needs to come after obj_reset_changes() to make
        # sure we only save the uuid, if we generate one.
        # FIXME(danms): Remove this in Newton once we have enforced that
        # all compute nodes have uuids set in the database.
        if 'uuid' not in compute:
            compute.uuid = uuidutils.generate_uuid()
            LOG.debug('Generated UUID %(uuid)s for compute node %(id)i',
                      dict(uuid=compute.uuid, id=compute.id))
            compute.save()

        return compute
Exemple #60
0
 def json(self):
     return jsonutils.loads(self.content)