def test_get_share_instance_export_location(self):
        client = self.admin_client
        share_instances = client.list_share_instances(self.share['id'])
        self.assertTrue(len(share_instances) > 0)
        self.assertIn('ID', share_instances[0])
        self.assertTrue(uuidutils.is_uuid_like(share_instances[0]['ID']))
        share_instance_id = share_instances[0]['ID']

        export_locations = client.list_share_instance_export_locations(
            share_instance_id)

        el = client.get_share_instance_export_location(
            share_instance_id, export_locations[0]['UUID'])

        expected_keys = (
            'path', 'updated_at', 'created_at', 'uuid',
            'is_admin_only', 'share_instance_id',
        )
        for key in expected_keys:
            self.assertIn(key, el)
        self.assertIn(el['is_admin_only'], ('True', 'False'))
        self.assertTrue(uuidutils.is_uuid_like(el['uuid']))
        for list_k, get_k in (
                ('UUID', 'uuid'), ('Created At', 'created_at'),
                ('Path', 'path'), ('Updated At', 'updated_at')):
            self.assertEqual(
                export_locations[0][list_k], el[get_k])
    def test_get_share_instance_export_location(self):
        self.skip_if_microversion_not_supported('2.14')

        client = self.admin_client
        share_instances = client.list_share_instances(self.share['id'])
        self.assertTrue(len(share_instances) > 0)
        self.assertIn('ID', share_instances[0])
        self.assertTrue(uuidutils.is_uuid_like(share_instances[0]['ID']))
        share_instance_id = share_instances[0]['ID']

        export_locations = client.list_share_instance_export_locations(
            share_instance_id)

        el = client.get_share_instance_export_location(
            share_instance_id, export_locations[0]['ID'])

        expected_keys = (
            'path', 'updated_at', 'created_at', 'id', 'preferred',
            'is_admin_only', 'share_instance_id',
        )
        for key in expected_keys:
            self.assertIn(key, el)
        self.assertIn(el['is_admin_only'], ('True', 'False'))
        self.assertIn(el['preferred'], ('True', 'False'))
        self.assertTrue(uuidutils.is_uuid_like(el['id']))
        for list_k, get_k in (
                ('ID', 'id'), ('Path', 'path'), ('Preferred', 'preferred'),
                ('Is Admin only', 'is_admin_only')):
            self.assertEqual(
                export_locations[0][list_k], el[get_k])
    def _verify_export_location_structure(
            self, export_locations, role='admin', detail=False):

        # Determine which keys to expect based on role, version and format
        summary_keys = ['id', 'path', 'links']
        if detail:
            summary_keys.extend(['created_at', 'updated_at'])

        admin_summary_keys = summary_keys + [
            'share_snapshot_instance_id', 'is_admin_only']

        if role == 'admin':
            expected_keys = admin_summary_keys
        else:
            expected_keys = summary_keys

        if not isinstance(export_locations, (list, tuple, set)):
            export_locations = (export_locations, )

        for export_location in export_locations:

            # Check that the correct keys are present
            self.assertEqual(len(expected_keys), len(export_location))
            for key in expected_keys:
                self.assertIn(key, export_location)

            # Check the format of ever-present summary keys
            self.assertTrue(uuidutils.is_uuid_like(export_location['id']))
            self.assertIsInstance(export_location['path'],
                                  six.string_types)

            if role == 'admin':
                self.assertIn(export_location['is_admin_only'], (True, False))
                self.assertTrue(uuidutils.is_uuid_like(
                    export_location['share_snapshot_instance_id']))
    def _verify_export_location_structure(self, export_locations,
                                          role='admin'):
        expected_keys = [
            'created_at', 'updated_at', 'path', 'uuid',
        ]
        if role == 'admin':
            expected_keys.extend(['is_admin_only', 'share_instance_id'])

        if not isinstance(export_locations, (list, tuple, set)):
            export_locations = (export_locations, )

        for export_location in export_locations:
            self.assertEqual(len(expected_keys), len(export_location))
            for key in expected_keys:
                self.assertIn(key, export_location)
            if role == 'admin':
                self.assertIn(export_location['is_admin_only'], (True, False))
                self.assertTrue(
                    uuidutils.is_uuid_like(
                        export_location['share_instance_id']))
            self.assertTrue(uuidutils.is_uuid_like(export_location['uuid']))
            self.assertTrue(
                isinstance(export_location['path'], six.string_types))
            for time in (export_location['created_at'],
                         export_location['updated_at']):
                # If var 'time' has incorrect value then ValueError exception
                # is expected to be raised. So, just try parse it making
                # assertion that it has proper date value.
                timeutils.parse_strtime(time)
Exemple #5
0
    def _validate_network_id(net_id, network_uuids):
        """Validates that a requested network id.

        This method performs two checks:

        1. That the network id is in the proper uuid format.
        2. That the network is not a duplicate when using nova-network.

        :param net_id: The network id to validate.
        :param network_uuids: A running list of requested network IDs that have
            passed validation already.
        :raises: webob.exc.HTTPBadRequest if validation fails
        """
        if not uuidutils.is_uuid_like(net_id):
            # NOTE(mriedem): Neutron would allow a network id with a br- prefix
            # back in Folsom so continue to honor that.
            # TODO(mriedem): Need to figure out if this is still a valid case.
            br_uuid = net_id.split('-', 1)[-1]
            if not uuidutils.is_uuid_like(br_uuid):
                msg = _("Bad networks format: network uuid is "
                        "not in proper format (%s)") % net_id
                raise exc.HTTPBadRequest(explanation=msg)

        # duplicate networks are allowed only for neutron v2.0
        if net_id in network_uuids and not utils.is_neutron():
            expl = _("Duplicate networks (%s) are not allowed") % net_id
            raise exc.HTTPBadRequest(explanation=expl)
 def _get_access_param(self, context, protocol, creds):
     if const.PROTOCOL_SNMP in protocol:
         if not uuidutils.is_uuid_like(creds):
             access_parameters = db.get_snmp_cred_by_name_and_protocol(
                 context, creds, protocol)
         else:
             access_parameters = db.get_snmp_cred_by_id(context, creds)
     else:
         if not uuidutils.is_uuid_like(creds):
             access_parameters = db.get_netconf_cred_by_name_and_protocol(
                 context, creds, protocol)
         else:
             access_parameters = db.get_netconf_cred_by_id(context, creds)
     if not access_parameters:
         raise webob.exc.HTTPBadRequest(
             _("Credentials not found for Id or name: %s") % creds)
     if isinstance(access_parameters, list) and len(access_parameters) > 1:
         raise webob.exc.HTTPBadRequest(
             _("Multiple credentials matches found "
               "for name: %s, use an ID to be more specific.") % creds)
     if isinstance(access_parameters, list):
         access_parameters = access_parameters[0]
     if access_parameters['protocol_type'] != protocol:
         raise webob.exc.HTTPBadRequest(
             _("Credentials not found for Id or name: %s") % creds)
     return access_parameters
Exemple #7
0
    def test_get_all_authorized(self):
        flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'),
                                     True)
        self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id')))
        flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'),
                                     True)
        self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id')))
        response = self.get(self.FLAVORS_PATH)

        self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
        auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
        self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
        project_id = uuidutils.generate_uuid()
        with mock.patch.object(octavia.common.context.Context, 'project_id',
                               project_id):
            override_credentials = {
                'service_user_id': None,
                'user_domain_id': None,
                'is_admin_project': True,
                'service_project_domain_id': None,
                'service_project_id': None,
                'roles': ['load-balancer_member'],
                'user_id': None,
                'is_admin': False,
                'service_user_domain_id': None,
                'project_domain_id': None,
                'service_roles': [],
                'project_id': project_id}
            with mock.patch(
                    "oslo_context.context.RequestContext.to_policy_values",
                    return_value=override_credentials):
                api_list = response.json.get(self.root_tag_list)
        self.conf.config(group='api_settings', auth_strategy=auth_strategy)
        self.assertEqual(2, len(api_list))
    def take_action(self, parsed_args):
        client = getattr(self.app.client_manager, "infra-optim")

        field_list = ['description', 'name', 'goal', 'strategy', 'scope']
        fields = dict((k, v) for (k, v) in vars(parsed_args).items()
                      if k in field_list and v is not None)

        # mandatory
        if not uuidutils.is_uuid_like(fields['goal']):
            fields['goal'] = client.goal.get(fields['goal']).uuid

        # optional
        if fields.get('strategy'):
            if not uuidutils.is_uuid_like(fields['strategy']):
                fields['strategy'] = client.strategy.get(
                    fields['strategy']).uuid
        if fields.get('scope'):
            fields['scope'] = common_utils.serialize_file_to_dict(
                fields['scope'])

        audit_template = client.audit_template.create(**fields)

        columns = res_fields.AUDIT_TEMPLATE_FIELDS
        column_headers = res_fields.AUDIT_TEMPLATE_FIELD_LABELS

        return (column_headers,
                utils.get_item_properties(audit_template, columns))
    def take_action(self, parsed_args):
        client = self.app.client_manager.data_protection
        if not uuidutils.is_uuid_like(parsed_args.provider_id):
            raise exceptions.CommandError(
                "Invalid provider id provided.")
        if not uuidutils.is_uuid_like(parsed_args.checkpoint_id):
            raise exceptions.CommandError(
                "Invalid checkpoint id provided.")

        restore_parameters = utils.extract_parameters(parsed_args)
        restore_auth = None
        if parsed_args.restore_target is not None:
            if parsed_args.restore_username is None:
                raise exceptions.CommandError(
                    "Must specify username for restore_target.")
            if parsed_args.restore_password is None:
                raise exceptions.CommandError(
                    "Must specify password for restore_target.")
            restore_auth = {
                'type': 'password',
                'username': parsed_args.restore_username,
                'password': parsed_args.restore_password,
            }
        restore = client.restores.create(parsed_args.provider_id,
                                         parsed_args.checkpoint_id,
                                         parsed_args.restore_target,
                                         restore_parameters, restore_auth)
        format_restore(restore._info)
        return zip(*sorted(restore._info.items()))
 def test_get_all_authorized(self):
     fp1 = self.create_flavor_profile('test1', 'noop_driver',
                                      '{"image": "ubuntu"}')
     self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
     fp2 = self.create_flavor_profile('test2', 'noop_driver-alt',
                                      '{"image": "ubuntu"}')
     self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))
     self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
     auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
     self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
     project_id = uuidutils.generate_uuid()
     with mock.patch.object(octavia.common.context.Context, 'project_id',
                            project_id):
         override_credentials = {
             'service_user_id': None,
             'user_domain_id': None,
             'is_admin_project': True,
             'service_project_domain_id': None,
             'service_project_id': None,
             'roles': ['load-balancer_member'],
             'user_id': None,
             'is_admin': True,
             'service_user_domain_id': None,
             'project_domain_id': None,
             'service_roles': [],
             'project_id': project_id}
         with mock.patch(
                 "oslo_context.context.RequestContext.to_policy_values",
                 return_value=override_credentials):
             response = self.get(self.FPS_PATH)
     self.conf.config(group='api_settings', auth_strategy=auth_strategy)
     api_list = response.json.get(self.root_tag_list)
     self.assertEqual(2, len(api_list))
    def create(self, workflow_identifier='', namespace='',
               workflow_input=None, description='', source_execution_id=None,
               **params):
        self._ensure_not_empty(
            workflow_identifier=workflow_identifier or source_execution_id
        )

        data = {'description': description}

        if uuidutils.is_uuid_like(source_execution_id):
            data.update({'source_execution_id': source_execution_id})

        if workflow_identifier:
            if uuidutils.is_uuid_like(workflow_identifier):
                data.update({'workflow_id': workflow_identifier})
            else:
                data.update({'workflow_name': workflow_identifier})

        if namespace:
            data.update({'workflow_namespace': namespace})

        if workflow_input:
            if isinstance(workflow_input, six.string_types):
                data.update({'input': workflow_input})
            else:
                data.update({'input': jsonutils.dumps(workflow_input)})

        if params:
            data.update({'params': jsonutils.dumps(params)})

        return self._create('/executions', data)
Exemple #12
0
    def host_passes(self, host_state, filter_properties):
        context = filter_properties['context']
        scheduler_hints = filter_properties.get('scheduler_hints') or {}

        affinity_uuids = scheduler_hints.get('same_host', [])

        # scheduler hint verification: affinity_uuids can be a list of uuids
        # or single uuid.  The checks here is to make sure every single string
        # in the list looks like a uuid, otherwise, this filter will fail to
        # pass.  Note that the filter does *NOT* ignore string doesn't look
        # like a uuid, it is better to fail the request than serving it wrong.
        if isinstance(affinity_uuids, list):
            for uuid in affinity_uuids:
                if uuidutils.is_uuid_like(uuid):
                    continue
                else:
                    return False
        elif uuidutils.is_uuid_like(affinity_uuids):
            affinity_uuids = [affinity_uuids]
        else:
            # Not a list, not a string looks like uuid, don't pass it
            # to DB for query to avoid potential risk.
            return False

        if affinity_uuids:
            return self.volume_api.get_all(
                context, filters={'host': host_state.host,
                                  'id': affinity_uuids,
                                  'deleted': False})

        # With no same_host key
        return True
    def test_get_snapshot_instance_export_location(self):
        client = self.admin_client
        snapshot_instances = client.list_snapshot_instances(
            self.snapshot['id'])

        self.assertGreater(len(snapshot_instances), 0)
        self.assertIn('ID', snapshot_instances[0])
        self.assertTrue(uuidutils.is_uuid_like(
            snapshot_instances[0]['ID']))

        snapshot_instance_id = snapshot_instances[0]['ID']

        export_locations = client.list_snapshot_instance_export_locations(
            snapshot_instance_id)

        el = client.get_snapshot_instance_export_location(
            snapshot_instance_id, export_locations[0]['ID'])
        expected_keys = ['path', 'id', 'is_admin_only',
                         'share_snapshot_instance_id', 'updated_at',
                         'created_at']

        for key in expected_keys:
            self.assertIn(key, el)
        for key, key_el in (
                ('ID', 'id'), ('Path', 'path'),
                ('Is Admin only', 'is_admin_only')):
            self.assertEqual(export_locations[0][key], el[key_el])
        self.assertTrue(uuidutils.is_uuid_like(
            el['share_snapshot_instance_id']))
        self.assertTrue(uuidutils.is_uuid_like(el['id']))
        self.assertIn(el['is_admin_only'], ('True', 'False'))
    def test_get_share_replica_export_location(self, role):
        share, share_replica = self._create_share_and_replica()
        client = self.admin_client if role == 'admin' else self.user_client
        export_locations = client.list_share_replica_export_locations(
            share_replica['id'])

        el = client.get_share_replica_export_location(
            share_replica['id'], export_locations[0]['ID'])

        expected_keys = ['path', 'updated_at', 'created_at', 'id',
                         'preferred', 'replica_state', 'availability_zone']
        if role == 'admin':
            expected_keys.extend(['is_admin_only', 'share_instance_id'])
        for key in expected_keys:
            self.assertIn(key, el)
        if role == 'admin':
            self.assertTrue(uuidutils.is_uuid_like(el['share_instance_id']))
            self.assertIn(el['is_admin_only'], ('True', 'False'))
        self.assertTrue(uuidutils.is_uuid_like(el['id']))
        self.assertIn(el['preferred'], ('True', 'False'))
        for list_k, get_k in (
                ('ID', 'id'), ('Path', 'path'), ('Preferred', 'preferred'),
                ('Replica State', 'replica_state'),
                ('Availability Zone', 'availability_zone')):
            self.assertEqual(
                export_locations[0][list_k], el[get_k])
Exemple #15
0
 def test_get_all(self):
     ref_flavor_1 = {
         u'description': u'description', u'enabled': True,
         u'flavor_profile_id': u'd21bf20d-c323-4004-bf67-f90591ceced9',
         u'id': u'172ccb10-a3b7-4c73-aee8-bdb77fb51ed5',
         u'name': u'name1'}
     flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'),
                                  True)
     self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id')))
     ref_flavor_1 = {
         u'description': u'description', u'enabled': True,
         u'flavor_profile_id': self.fp.get('id'),
         u'id': flavor1.get('id'),
         u'name': u'name1'}
     flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'),
                                  True)
     self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id')))
     ref_flavor_2 = {
         u'description': u'description', u'enabled': True,
         u'flavor_profile_id': self.fp.get('id'),
         u'id': flavor2.get('id'),
         u'name': u'name2'}
     response = self.get(self.FLAVORS_PATH)
     api_list = response.json.get(self.root_tag_list)
     self.assertEqual(2, len(api_list))
     self.assertIn(ref_flavor_1, api_list)
     self.assertIn(ref_flavor_2, api_list)
Exemple #16
0
    def _get_requested_networks(self, requested_networks):
        """Create a list of requested networks from the networks attribute."""
        networks = []
        network_uuids = []
        for network in requested_networks:
            request = objects.NetworkRequest()
            try:
                try:
                    request.port_id = network.get('port', None)
                except ValueError:
                    msg = _("Bad port format: port uuid is "
                            "not in proper format "
                            "(%s)") % network.get('port')
                    raise exc.HTTPBadRequest(explanation=msg)
                if request.port_id:
                    request.network_id = None
                    if not utils.is_neutron():
                        # port parameter is only for neutron v2.0
                        msg = _("Unknown argument : port")
                        raise exc.HTTPBadRequest(explanation=msg)
                else:
                    request.network_id = network['uuid']

                if (not request.port_id and not
                        uuidutils.is_uuid_like(request.network_id)):
                    br_uuid = request.network_id.split('-', 1)[-1]
                    if not uuidutils.is_uuid_like(br_uuid):
                        msg = _("Bad networks format: network uuid is "
                                "not in proper format "
                                "(%s)") % request.network_id
                        raise exc.HTTPBadRequest(explanation=msg)

                # fixed IP address is optional
                # if the fixed IP address is not provided then
                # it will use one of the available IP address from the network
                try:
                    request.address = network.get('fixed_ip', None)
                except ValueError:
                    msg = (_("Invalid fixed IP address (%s)") %
                           network.get('fixed_ip'))
                    raise exc.HTTPBadRequest(explanation=msg)

                # duplicate networks are allowed only for neutron v2.0
                if (not utils.is_neutron() and request.network_id and
                        request.network_id in network_uuids):
                    expl = (_("Duplicate networks"
                              " (%s) are not allowed") %
                            request.network_id)
                    raise exc.HTTPBadRequest(explanation=expl)
                network_uuids.append(request.network_id)
                networks.append(request)
            except KeyError as key:
                expl = _('Bad network format: missing %s') % key
                raise exc.HTTPBadRequest(explanation=expl)
            except TypeError:
                expl = _('Bad networks format')
                raise exc.HTTPBadRequest(explanation=expl)

        return objects.NetworkRequestList(objects=networks)
    def _assert_graphs_equal(self, expected_graph, observed_graph):
        observed_graph_copy = copy.deepcopy(observed_graph)
        del observed_graph_copy['created_at']
        del observed_graph_copy['updated_at']
        obs_lb_id = observed_graph_copy.pop('id')

        self.assertTrue(uuidutils.is_uuid_like(obs_lb_id))
        expected_listeners = expected_graph.pop('listeners', [])
        observed_listeners = observed_graph_copy.pop('listeners', [])
        self.assertEqual(expected_graph, observed_graph_copy)
        for observed_listener in observed_listeners:
            del observed_listener['created_at']
            del observed_listener['updated_at']

            self.assertTrue(uuidutils.is_uuid_like(
                observed_listener.pop('id')))
            default_pool = observed_listener.get('default_pool')
            if default_pool:
                observed_listener.pop('default_pool_id')
                self.assertTrue(default_pool.get('id'))
                default_pool.pop('id')
                default_pool.pop('created_at')
                default_pool.pop('updated_at')
                hm = default_pool.get('healthmonitor')
                if hm:
                    self.assertTrue(hm.get('id'))
                    hm.pop('id')
                for member in default_pool.get('members', []):
                    self.assertTrue(member.get('id'))
                    member.pop('id')
                    member.pop('created_at')
                    member.pop('updated_at')
            if observed_listener.get('sni_containers'):
                observed_listener['sni_containers'].sort()
            o_l7policies = observed_listener.get('l7policies')
            if o_l7policies:
                for o_l7policy in o_l7policies:
                    if o_l7policy.get('redirect_pool'):
                        r_pool = o_l7policy.get('redirect_pool')
                        self.assertTrue(r_pool.get('id'))
                        r_pool.pop('id')
                        r_pool.pop('created_at')
                        r_pool.pop('updated_at')
                        self.assertTrue(o_l7policy.get('redirect_pool_id'))
                        o_l7policy.pop('redirect_pool_id')
                        if r_pool.get('members'):
                            for r_member in r_pool.get('members'):
                                self.assertTrue(r_member.get('id'))
                                r_member.pop('id')
                                r_member.pop('created_at')
                                r_member.pop('updated_at')
                    self.assertTrue(o_l7policy.get('id'))
                    o_l7policy.pop('id')
                    l7rules = o_l7policy.get('l7rules')
                    for l7rule in l7rules:
                        self.assertTrue(l7rule.get('id'))
                        l7rule.pop('id')
            self.assertIn(observed_listener, expected_listeners)
    def _verify_export_location_structure(
            self, export_locations, role='admin', version=LATEST_MICROVERSION,
            format='summary'):

        # Determine which keys to expect based on role, version and format
        summary_keys = ['id', 'path']
        if utils.is_microversion_ge(version, '2.14'):
            summary_keys += ['preferred']

        admin_summary_keys = summary_keys + [
            'share_instance_id', 'is_admin_only']

        detail_keys = summary_keys + ['created_at', 'updated_at']

        admin_detail_keys = admin_summary_keys + ['created_at', 'updated_at']

        if format == 'summary':
            if role == 'admin':
                expected_keys = admin_summary_keys
            else:
                expected_keys = summary_keys
        else:
            if role == 'admin':
                expected_keys = admin_detail_keys
            else:
                expected_keys = detail_keys

        if not isinstance(export_locations, (list, tuple, set)):
            export_locations = (export_locations, )

        for export_location in export_locations:

            # Check that the correct keys are present
            self.assertEqual(len(expected_keys), len(export_location))
            for key in expected_keys:
                self.assertIn(key, export_location)

            # Check the format of ever-present summary keys
            self.assertTrue(uuidutils.is_uuid_like(export_location['id']))
            self.assertTrue(isinstance(export_location['path'],
                                       six.string_types))

            if utils.is_microversion_ge(version, '2.14'):
                self.assertIn(export_location['preferred'], (True, False))

            if role == 'admin':
                self.assertIn(export_location['is_admin_only'], (True, False))
                self.assertTrue(uuidutils.is_uuid_like(
                    export_location['share_instance_id']))

            # Check the format of the detail keys
            if format == 'detail':
                for time in (export_location['created_at'],
                             export_location['updated_at']):
                    # If var 'time' has incorrect value then ValueError
                    # exception is expected to be raised. So, just try parse
                    # it making assertion that it has proper date value.
                    timeutils.parse_strtime(time)
    def test_list_all_snapshot_instances(self):
        snapshot_instances = self.admin_client.list_snapshot_instances()

        self.assertGreater(len(snapshot_instances), 0)
        expected_keys = ('ID', 'Snapshot ID', 'Status')
        for si in snapshot_instances:
            for key in expected_keys:
                self.assertIn(key, si)
            self.assertTrue(uuidutils.is_uuid_like(si['ID']))
            self.assertTrue(uuidutils.is_uuid_like(si['Snapshot ID']))
Exemple #20
0
def check_resize(cluster, r_node_groups):
    ng_map = {}
    for ng in cluster.node_groups:
        ng_map[ng.name] = ng

    check_duplicates_node_groups_names(r_node_groups)

    for ng in r_node_groups:
        if ng['name'] not in ng_map.keys():
            raise ex.InvalidReferenceException(
                _("Cluster doesn't contain node group with name '%s'")
                % ng['name'])
        node_group = ng_map[ng['name']]
        if node_group.get('node_group_template_id'):
            ng_tmpl_id = node_group['node_group_template_id']
            check_node_group_template_exists(ng_tmpl_id)
            ng_tmp = api.get_node_group_template(ng_tmpl_id).to_wrapped_dict()
            check_node_group_basic_fields(cluster.plugin_name,
                                          cluster.hadoop_version,
                                          ng_tmp['node_group_template'])

    for scaling_ng in r_node_groups:
        current_count = ng_map[scaling_ng['name']].count
        new_count = scaling_ng['count']
        count_diff = current_count - new_count
        if 'instances' in scaling_ng:
            if len(scaling_ng['instances']) > count_diff:
                raise ex.InvalidDataException(
                    _("Number of specific instances (%(instance)s) to"
                      " delete can not be greater than the count difference"
                      " (%(count)s during scaling")
                    % {'instance': str(len(scaling_ng['instances'])),
                       'count': str(count_diff)})
            else:
                if len(scaling_ng['instances']) > 0:
                    is_uuid = uuidutils.is_uuid_like(
                        scaling_ng['instances'][0])
                    if is_uuid:
                        for instance in scaling_ng['instances']:
                            if not uuidutils.is_uuid_like(instance):
                                raise ex.InvalidReferenceException(
                                    _("You can only reference instances by"
                                      " Name or UUID, not both on the same"
                                      " request"))
                    else:
                        for instance in scaling_ng['instances']:
                            if uuidutils.is_uuid_like(instance):
                                raise ex.InvalidReferenceException(
                                    _("You can only reference instances by"
                                      " Name or UUID, not both on the same"
                                      " request"))
                    _check_duplicates(scaling_ng['instances'],
                                      _("Duplicate entry for instances to"
                                        " delete"))
    def test_list_snapshot_instance_with_snapshot(self):
        snapshot_instances = self.admin_client.list_snapshot_instances(
            snapshot_id=self.snapshot['id'])

        self.assertEqual(1, len(snapshot_instances))
        expected_keys = ('ID', 'Snapshot ID', 'Status')
        for si in snapshot_instances:
            for key in expected_keys:
                self.assertIn(key, si)
            self.assertTrue(uuidutils.is_uuid_like(si['ID']))
            self.assertTrue(uuidutils.is_uuid_like(si['Snapshot ID']))
    def check_upgrade(self, engine, _):
        az_table = utils.load_table("availability_zones", engine)

        for az in engine.execute(az_table.select()):
            self.test_case.assertTrue(uuidutils.is_uuid_like(az.id))
            self.test_case.assertTrue(az.name in self.valid_az_names)
            self.test_case.assertEqual("False", az.deleted)

        services_table = utils.load_table("services", engine)
        for service in engine.execute(services_table.select()):
            self.test_case.assertTrue(uuidutils.is_uuid_like(service.availability_zone_id))
    def test_reset_group_snapshot(self):
        # Create group
        group1 = self.api.post_group(
            {'group': {'group_type': self.group_type['id'],
                       'volume_types': [self.volume_type['id']]}})
        self.assertTrue(uuidutils.is_uuid_like(group1['id']))
        group_id = group1['id']
        self._poll_group_while(group_id, ['creating'])

        # Create volume
        created_volume = self.api.post_volume(
            {'volume': {'size': 1,
                        'group_id': group_id,
                        'volume_type': self.volume_type['id']}})
        self.assertTrue(uuidutils.is_uuid_like(created_volume['id']))
        created_volume_id = created_volume['id']
        self._poll_volume_while(created_volume_id, ['creating'])

        # Create group snapshot
        group_snapshot1 = self.api.post_group_snapshot(
            {'group_snapshot': {'group_id': group_id}})
        self.assertTrue(uuidutils.is_uuid_like(group_snapshot1['id']))
        group_snapshot_id = group_snapshot1['id']

        self._poll_group_snapshot_while(group_snapshot_id,
                                        fields.GroupSnapshotStatus.CREATING)

        group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id)
        self.assertEqual(fields.GroupSnapshotStatus.AVAILABLE,
                         group_snapshot1['status'])

        # reset group snapshot status
        self.api.reset_group_snapshot(group_snapshot_id, {"reset_status": {
            "status": fields.GroupSnapshotStatus.ERROR}})

        group_snapshot1 = self.api.get_group_snapshot(group_snapshot_id)
        self.assertEqual(fields.GroupSnapshotStatus.ERROR,
                         group_snapshot1['status'])

        # Delete group, volume and group snapshot
        self.api.delete_group_snapshot(group_snapshot_id)
        found_group_snapshot = self._poll_group_snapshot_while(
            group_snapshot_id, [fields.GroupSnapshotStatus.DELETING])
        self.api.delete_group(group_id,
                              {'delete': {'delete-volumes': True}})

        found_volume = self._poll_volume_while(created_volume_id, ['deleting'])
        found_group = self._poll_group_while(group_id, ['deleting'])

        # Created resources should be gone
        self.assertIsNone(found_group_snapshot)
        self.assertIsNone(found_volume)
        self.assertIsNone(found_group)
Exemple #24
0
    def _get_requested_networks(self, requested_networks):
        """Create a list of requested networks from the networks attribute."""
        networks = []
        network_uuids = []
        for network in requested_networks:
            request = objects.NetworkRequest()
            try:
                # fixed IP address is optional
                # if the fixed IP address is not provided then
                # it will use one of the available IP address from the network
                request.address = network.get("fixed_ip", None)
                request.port_id = network.get("port", None)

                if request.port_id:
                    request.network_id = None
                    if not utils.is_neutron():
                        # port parameter is only for neutron v2.0
                        msg = _("Unknown argument: port")
                        raise exc.HTTPBadRequest(explanation=msg)
                    if request.address is not None:
                        msg = _(
                            "Specified Fixed IP '%(addr)s' cannot be used "
                            "with port '%(port)s': port already has "
                            "a Fixed IP allocated."
                        ) % {"addr": request.address, "port": request.port_id}
                        raise exc.HTTPBadRequest(explanation=msg)
                else:
                    request.network_id = network["uuid"]

                if not request.port_id and not uuidutils.is_uuid_like(request.network_id):
                    br_uuid = request.network_id.split("-", 1)[-1]
                    if not uuidutils.is_uuid_like(br_uuid):
                        msg = (
                            _("Bad networks format: network uuid is " "not in proper format " "(%s)")
                            % request.network_id
                        )
                        raise exc.HTTPBadRequest(explanation=msg)

                # duplicate networks are allowed only for neutron v2.0
                if not utils.is_neutron() and request.network_id and request.network_id in network_uuids:
                    expl = _("Duplicate networks" " (%s) are not allowed") % request.network_id
                    raise exc.HTTPBadRequest(explanation=expl)
                network_uuids.append(request.network_id)
                networks.append(request)
            except KeyError as key:
                expl = _("Bad network format: missing %s") % key
                raise exc.HTTPBadRequest(explanation=expl)
            except TypeError:
                expl = _("Bad networks format")
                raise exc.HTTPBadRequest(explanation=expl)

        return objects.NetworkRequestList(objects=networks)
Exemple #25
0
 def test_get_all_not_authorized(self):
     flavor1 = self.create_flavor('name1', 'description', self.fp.get('id'),
                                  True)
     self.assertTrue(uuidutils.is_uuid_like(flavor1.get('id')))
     flavor2 = self.create_flavor('name2', 'description', self.fp.get('id'),
                                  True)
     self.assertTrue(uuidutils.is_uuid_like(flavor2.get('id')))
     self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
     auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
     self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
     response = self.get(self.FLAVORS_PATH, status=403).json
     self.conf.config(group='api_settings', auth_strategy=auth_strategy)
     self.assertEqual(self.NOT_AUTHORIZED_BODY, response)
    def test_get_all_not_authorized(self):
        fp1 = self.create_flavor_profile('test1', 'noop_driver',
                                         '{"image": "ubuntu"}')
        self.assertTrue(uuidutils.is_uuid_like(fp1.get('id')))
        fp2 = self.create_flavor_profile('test2', 'noop_driver-alt',
                                         '{"image": "ubuntu"}')
        self.assertTrue(uuidutils.is_uuid_like(fp2.get('id')))

        self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
        auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
        self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
        self.get(self.FPS_PATH, status=403)
        self.conf.config(group='api_settings', auth_strategy=auth_strategy)
    def __call__(self, req, **local_config):
        super(DetachNetworkCheck, self).__call__(req)
        if not self.enabled:
            return self.app

# TODO(jlh): eventually we will need to make this a wafflehaus supported fx
        verb = req.method
        if verb != "DELETE":
            return self.app

        context = self._get_context(req)
        if not context:
            return self.app
        projectid = context.project_id

# TODO(jlh): shouldn't be using PATH_INFO, but PATH instead
        path = req.environ.get("PATH_INFO")
        if path is None:
            return self.app

        pathparts = [part for part in path.split("/") if part]
        if len(pathparts) != 5:
            return self.app
        if (pathparts[0] != projectid or
                pathparts[1] != "servers" or
                pathparts[3] != "os-virtual-interfacesv2"):
            return self.app

        server_uuid = pathparts[2]
        vif_uuid = pathparts[4]
        if (not uuidutils.is_uuid_like(server_uuid) or
                not uuidutils.is_uuid_like(vif_uuid)):
            return self.app
# TODO(jlh): Everything above ^^ is what needs to be one line

        # at this point we know it is the correct call
        ent_maker = _translate_vif_summary_view
        network_info = self._get_network_info(context, server_uuid,
                                              entity_maker=ent_maker)

        msg = "Network (%s) cannot be detached"
        network_list = ",".join(self.required_networks)
        for vif in network_info["virtual_interfaces"]:
            if vif['id'] == vif_uuid:
                ip_info = vif['ip_addresses']
                network_id = ip_info[0]['network_id']
                if network_id in self.required_networks:
                    self.log.info("attempt to detach required network")
                    return webob.exc.HTTPForbidden(msg % network_list)

        return self.app
def normalize_service_graph(port_chains):
    port_chains = lib_converters.convert_none_to_empty_dict(port_chains)
    for key in port_chains:
        if uuidutils.is_uuid_like(key):
            for val in port_chains[key]:
                if not uuidutils.is_uuid_like(val):
                    raise InvalidUUID(
                        error_message='UUID of destination Port-Chain '
                                      'is invalid: %s.' % key)
        else:
            raise InvalidUUID(
                error_message='UUID of source Port-Chain'
                              'is invalid: %s.' % key)
    return port_chains
    def take_action(self, parsed_args):
        client = self.app.client_manager.data_protection
        if not uuidutils.is_uuid_like(parsed_args.provider_id):
            raise exceptions.CommandError(
                "Invalid provider id provided.")
        if not uuidutils.is_uuid_like(parsed_args.checkpoint_id):
            raise exceptions.CommandError(
                "Invalid checkpoint id provided.")

        verification_parameters = utils.extract_parameters(parsed_args)
        verification = client.verifications.create(parsed_args.provider_id,
                                                   parsed_args.checkpoint_id,
                                                   verification_parameters)
        format_verification(verification._info)
        return zip(*sorted(verification._info.items()))
Exemple #30
0
    def check_operation_definition(cls, operation_definition):
        provider_id = operation_definition.get("provider_id")
        if not provider_id or not uuidutils.is_uuid_like(provider_id):
            reason = _("Provider_id is invalid")
            raise exception.InvalidOperationDefinition(reason=reason)

        plan_id = operation_definition.get("plan_id")
        if not plan_id or not uuidutils.is_uuid_like(plan_id):
            reason = _("Plan_id is invalid")
            raise exception.InvalidOperationDefinition(reason=reason)

        plan = objects.Plan.get_by_id(context.get_admin_context(), plan_id)
        if provider_id != plan.provider_id:
            reason = _("Provider_id is invalid")
            raise exception.InvalidOperationDefinition(reason=reason)
Exemple #31
0
    def _update_network_resource(self,
                                 context,
                                 updated_res,
                                 updated_dep,
                                 resource,
                                 except_subnet=None):

        LOG.debug("Update network %s resource with %s.",
                  resource['resource_id'], resource)

        properties = resource
        new_res_id = properties.pop('id', None)
        resource_id = properties.pop('resource_id', None)
        properties.pop('resource_type', None)

        org_net = updated_res[resource_id]
        org_net_id = org_net.id

        if new_res_id and not uuidutils.is_uuid_like(new_res_id):
            msg = "Network id <%s> must be uuid." % new_res_id
            LOG.error(msg)
            raise exception.PlanResourcesUpdateError(message=msg)

        if new_res_id and new_res_id != org_net_id:
            # Make sure the number of subnets larger than one.
            net = self.neutron_api.get_network(context, new_res_id)
            subnets = net.get('subnets', [])
            if not subnets:
                msg = "No subnets found in network %s." % new_res_id
                LOG.error(msg)
                raise exception.PlanResourcesUpdateError(message=msg)

            # Validate whether network exists on a server.
            self._validate_server_network_duplication(updated_res, resource_id,
                                                      new_res_id)

            # Extracted network resource.
            nr = networks.NetworkResource(context)
            net_res = nr.extract_nets([new_res_id])[0]

            # Update network resource.
            net_res.name = resource_id
            updated_res[resource_id] = net_res

            # Update corresponding subnet resources.
            for rid, dep in updated_dep.items():
                if dep.type == "OS::Neutron::Subnet" and resource_id in \
                        dep.dependencies:
                    subnet_res = updated_res.get(rid)

                    if not subnet_res or except_subnet == subnet_res.name:
                        continue

                    # Randomly choose a subnet.
                    random_index = random.randint(0, len(subnets) - 1)
                    random_sub_id = subnets[random_index]

                    self._update_subnet_and_port(context, updated_res,
                                                 updated_dep, rid,
                                                 random_sub_id)
        else:
            # need to modify
            LOG.info(
                "Network <%s> is the same as original network. "
                "updating the org_net info", org_net_id)
            self._update_org_net_info(context, updated_res, updated_dep,
                                      resource_id)

            if properties.get('value_specs') and \
               not properties.get('value_specs').\
                       get('provider:segmentation_id'):
                if updated_res[resource_id].properties.\
                        get('value_specs').get('provider:segmentation_id'):
                    updated_res[resource_id].properties.\
                        get('value_specs').pop('provider:segmentation_id')
            elif not properties.get('value_specs'):
                if updated_res[resource_id].properties.\
                        get('value_specs').get('provider:segmentation_id'):
                    updated_res[resource_id].properties.\
                        get('value_specs').pop('provider:segmentation_id')

        # Update other fields.
        for k, v in properties.items():
            updated_res[resource_id].properties[k] = v
Exemple #32
0
    def update(self, req, id, body):
        """Perform service update

        Starting with microversion 2.53, the service uuid is passed in on the
        path of the request to uniquely identify the service record on which to
        perform a given update, which is defined in the body of the request.
        """
        service_id = id
        # Validate that the service ID is a UUID.
        if not uuidutils.is_uuid_like(service_id):
            msg = _('Invalid uuid %s') % service_id
            raise webob.exc.HTTPBadRequest(explanation=msg)

        # Validate the request context against the policy.
        context = req.environ['nova.context']
        context.can(services_policies.BASE_POLICY_NAME)

        # Get the service by uuid.
        try:
            service = self.host_api.service_get_by_id(context, service_id)
            # At this point the context is targeted to the cell that the
            # service was found in so we don't need to do any explicit cell
            # targeting below.
        except exception.ServiceNotFound as e:
            raise webob.exc.HTTPNotFound(explanation=e.format_message())

        # Return 400 if service.binary is not nova-compute.
        # Before the earlier PUT handlers were made cells-aware, you could
        # technically disable a nova-scheduler service, although that doesn't
        # really do anything within Nova and is just confusing. Now trying to
        # do that will fail as a nova-scheduler service won't have a host
        # mapping so you'll get a 400. In this new microversion, we close that
        # old gap and make sure you can only enable/disable and set forced_down
        # on nova-compute services since those are the only ones that make
        # sense to update for those operations.
        if service.binary != 'nova-compute':
            msg = (_('Updating a %(binary)s service is not supported. Only '
                     'nova-compute services can be updated.') % {
                         'binary': service.binary
                     })
            raise webob.exc.HTTPBadRequest(explanation=msg)

        # Now determine the update to perform based on the body. We are
        # intentionally not using _perform_action or the other old-style
        # action functions.
        if 'status' in body:
            # This is a status update for either enabled or disabled.
            if body['status'] == 'enabled':

                # Fail if 'disabled_reason' was requested when enabling the
                # service since those two combined don't make sense.
                if body.get('disabled_reason'):
                    msg = _("Specifying 'disabled_reason' with status "
                            "'enabled' is invalid.")
                    raise webob.exc.HTTPBadRequest(explanation=msg)

                service.disabled = False
                service.disabled_reason = None
            elif body['status'] == 'disabled':
                service.disabled = True
                # The disabled reason is optional.
                service.disabled_reason = body.get('disabled_reason')

        # This is intentionally not an elif, i.e. it's in addition to the
        # status update.
        if 'forced_down' in body:
            service.forced_down = strutils.bool_from_string(
                body['forced_down'], strict=True)

        # Check to see if anything was actually updated since the schema does
        # not define any required fields.
        if not service.obj_what_changed():
            msg = _("No updates were requested. Fields 'status' or "
                    "'forced_down' should be specified.")
            raise webob.exc.HTTPBadRequest(explanation=msg)

        # Now save our updates to the service record in the database.
        self.host_api.service_update(context, service)

        # Return the full service record details.
        additional_fields = ['forced_down']
        return {
            'service': self._get_service_detail(service, additional_fields,
                                                req)
        }
 def get_by_hint(cls, context, hint):
     if uuidutils.is_uuid_like(hint):
         return cls.get_by_uuid(context, hint)
     else:
         return cls.get_by_name(context, hint)
Exemple #34
0
 def validate(value):
     if not uuidutils.is_uuid_like(value):
         raise api_exception.ParameterException()
     return value
Exemple #35
0
    def _update_port_resource(self, context, updated_res, resource):

        LOG.debug("Update port %s resource with %s.", resource['resource_id'],
                  resource)

        properties = resource
        resource_id = properties.pop('resource_id', None)
        resource_obj = updated_res[resource_id]
        properties.pop('resource_type', None)
        # Only fixed_ips can be updated.
        ips_to_update = properties.pop('fixed_ips')
        if not ips_to_update:
            msg = "Only 'fixed_ips' property is allowed be updated on a port."
            LOG.error(msg)
            raise exception.PlanResourcesUpdateError(message=msg)

        # Validate the number of ips on a port
        original_ips = resource_obj.properties.get('fixed_ips')
        if len(original_ips) != len(ips_to_update):
            msg = "The number of fixed ips must remain the same."
            LOG.error(msg)
            raise exception.PlanResourcesUpdateError(message=msg)

        def _get_pools(subnet_id):
            """Get subnet allocation_pools by neutron api."""
            try:
                subnet = self.neutron_api.get_subnet(context, subnet_id)
                return subnet.get('allocation_pools', [])
            except Exception as e:
                msg = "Subnet <%s> not found. %s" % (subnet_id, unicode(e))
                LOG.error(msg)
                raise exception.PlanResourcesUpdateError(message=msg)

        # Validate whether ip address matches the subnet.
        for item in ips_to_update:
            ip_address = item.get('ip_address')
            subnet_id = item.get('subnet_id')

            LOG.debug("Check fixed ip: %s", item)

            # subnet_id is required, ip_address is optional
            if not subnet_id:
                msg = "subnet_id must be provided when updating fixed_ips."
                LOG.error(msg)
                raise exception.PlanResourcesUpdateError(message=msg)

            # If ip_address is provided, validate it.
            if ip_address:
                LOG.debug("Validate ip address %s.", ip_address)
                # Get subnet range from exist subnet resource.
                allocation_pools = []
                if isinstance(subnet_id, dict) and len(subnet_id) == 1:
                    # Only support 'get_param' and 'get_resource'
                    if subnet_id.get('get_param'):
                        sub_param_id = subnet_id['get_param']
                        if isinstance(sub_param_id, six.string_types):
                            subnet_id = resource_obj.\
                                parameters .\
                                get(sub_param_id, {}).get('default')
                            LOG.debug(
                                "Get subnet id <%s> "
                                "from parameter <%s>.", subnet_id,
                                sub_param_id)
                            if subnet_id:
                                allocation_pools = _get_pools(subnet_id)
                            else:
                                msg = "%s parameter not found." % sub_param_id
                                LOG.error(msg)
                                raise exception.\
                                    PlanResourcesUpdateError(message=msg)
                    elif subnet_id.get('get_resource'):
                        sub_res_id = subnet_id['get_resource']
                        if isinstance(sub_res_id, six.string_types) \
                            and updated_res.get(sub_res_id):
                            allocation_pools = updated_res[sub_res_id].\
                                properties.get('allocation_pools')
                        else:
                            msg = "%s resource not found." % sub_res_id
                            LOG.error(msg)
                            raise exception.\
                                PlanResourcesUpdateError(message=msg)
                elif isinstance(subnet_id, six.string_types):
                    if uuidutils.is_uuid_like(subnet_id):
                        allocation_pools = _get_pools(subnet_id)
                    else:
                        msg = "Subnet id must be uuid."
                        LOG.error(msg)
                        raise exception.PlanResourcesUpdateError(message=msg)

                if not allocation_pools:
                    msg = "Can not found subnet allocation_pools information."
                    LOG.error(msg)
                    raise exception.PlanResourcesUpdateError(message=msg)

                # Validate whether ip address in ip range.
                ip_valid = False
                for pool in allocation_pools:
                    start = pool.get('start')
                    end = pool.get('end')
                    if isinstance(start, six.string_types) \
                        and isinstance(end, six.string_types) \
                        and netaddr.IPAddress(ip_address) in \
                                    netaddr.IPRange(start, end):
                        ip_valid = True

                if not ip_valid:
                    msg = ("Ip address doesn't match allocation_pools %s." %
                           allocation_pools)
                    LOG.error(msg)
                    raise exception.PlanResourcesUpdateError(message=msg)

            # Begin to update.
            ip_index = ips_to_update.index(item)
            original_ip_item = original_ips[ip_index]
            original_subnet = original_ip_item.get('subnet_id')

            # Update ip_address
            if ip_address:
                original_ips[ip_index]['ip_address'] = ip_address

            # If subnets are the same, only update ip_address if provided.
            if original_subnet == subnet_id:
                pass
            # If subnet_id is from other exist resource, replace directly.
            elif isinstance(subnet_id, dict) and len(subnet_id) == 1 \
                            and subnet_id.get('get_resource'):
                sub_res_id = subnet_id['get_resource']
                if isinstance(sub_res_id, six.string_types) \
                              and updated_res.get(sub_res_id):
                    original_ips[ip_index]['subnet_id'] = subnet_id
                    LOG.debug("Update ip_address property %s.",
                              original_ips[ip_index])
                else:
                    msg = "%s resource not found." % sub_res_id
                    LOG.error(msg)
                    raise exception.PlanResourcesUpdateError(message=msg)
            # If subnet_id is a uuid, get resource by neutron driver.
            # If this subnet has been extracted, it won't be extracted again.
            elif uuidutils.is_uuid_like(subnet_id):
                # Replace the keys by actual_id
                LOG.debug("Extract subnet <%s> resource.", subnet_id)

                # Extracted subnet resource.
                self._resource_id_to_actual_id(updated_res)
                nr = networks.NetworkResource(context,
                                              collected_resources=updated_res)
                subnet_res = nr.extract_subnets([subnet_id])[0]

                # Restore the keys
                self._actual_id_to_resource_id(updated_res)
                original_ips[ip_index]['subnet_id'] = {
                    'get_resource': subnet_res.name
                }

                LOG.debug("Update ip_address property %s.",
                          original_ips[ip_index])
            else:
                msg = "subnet_id (%s) is invalid." % subnet_id
                LOG.error(msg)
                raise exception.PlanResourcesUpdateError(message=msg)

        # we need to create new port
        resource_obj.id = None
        # Update other fields.
        for k, v in properties.items():
            updated_res[resource_id].properties[k] = v
Exemple #36
0
 def get_resource_class(self, context, resource_ident):
     if uuidutils.is_uuid_like(resource_ident):
         return self._get_resource_class_by_uuid(context, resource_ident)
     else:
         return self._get_resource_class_by_name(context, resource_ident)
Exemple #37
0
    def validate(value):
        if not uuidutils.is_uuid_like(value):
            raise exc.InputException("Expected a uuid but received %s." %
                                     value)

        return value
Exemple #38
0
    def create(self, req, body):
        """Creates a new volume."""
        if not self.is_valid_body(body, 'volume'):
            raise exc.HTTPUnprocessableEntity()

        LOG.debug('Create volume request body: %s', body)
        context = req.environ['cinder.context']
        volume = body['volume']

        kwargs = {}

        req_volume_type = volume.get('volume_type', None)
        if req_volume_type:
            try:
                if not uuidutils.is_uuid_like(req_volume_type):
                    kwargs['volume_type'] = \
                        volume_types.get_volume_type_by_name(
                            context, req_volume_type)
                else:
                    kwargs['volume_type'] = volume_types.get_volume_type(
                        context, req_volume_type)
            except exception.VolumeTypeNotFound:
                explanation = 'Volume type not found.'
                raise exc.HTTPNotFound(explanation=explanation)

        kwargs['metadata'] = volume.get('metadata', None)

        snapshot_id = volume.get('snapshot_id')
        if snapshot_id is not None:
            try:
                kwargs['snapshot'] = self.volume_api.get_snapshot(context,
                                                                  snapshot_id)
            except exception.NotFound:
                explanation = _('snapshot id:%s not found') % snapshot_id
                raise exc.HTTPNotFound(explanation=explanation)

        else:
            kwargs['snapshot'] = None

        source_volid = volume.get('source_volid')
        if source_volid is not None:
            try:
                kwargs['source_volume'] = \
                    self.volume_api.get_volume(context,
                                               source_volid)
            except exception.NotFound:
                explanation = _('source vol id:%s not found') % source_volid
                raise exc.HTTPNotFound(explanation=explanation)
        else:
            kwargs['source_volume'] = None

        size = volume.get('size', None)
        if size is None and kwargs['snapshot'] is not None:
            size = kwargs['snapshot']['volume_size']
        elif size is None and kwargs['source_volume'] is not None:
            size = kwargs['source_volume']['size']

        LOG.info(_LI("Create volume of %s GB"), size, context=context)
        multiattach = volume.get('multiattach', False)
        kwargs['multiattach'] = multiattach

        image_href = None
        image_uuid = None
        if self.ext_mgr.is_loaded('os-image-create'):
            # NOTE(jdg): misleading name "imageRef" as it's an image-id
            image_href = volume.get('imageRef')
            if image_href is not None:
                image_uuid = self._image_uuid_from_href(image_href)
                kwargs['image_id'] = image_uuid

        kwargs['availability_zone'] = volume.get('availability_zone', None)

        new_volume = self.volume_api.create(context,
                                            size,
                                            volume.get('display_name'),
                                            volume.get('display_description'),
                                            **kwargs)

        retval = _translate_volume_detail_view(context, new_volume, image_uuid)

        return {'volume': retval}
Exemple #39
0
 def get_resource_provider(self, context, provider_ident):
     if uuidutils.is_uuid_like(provider_ident):
         return self._get_resource_provider_by_uuid(context, provider_ident)
     else:
         return self._get_resource_provider_by_name(context, provider_ident)
Exemple #40
0
def _set_allocations_for_consumer(req, schema):
    context = req.environ['placement.context']
    context.can(policies.ALLOC_UPDATE)
    consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')
    if not uuidutils.is_uuid_like(consumer_uuid):
        raise webob.exc.HTTPBadRequest(
            _('Malformed consumer_uuid: %(consumer_uuid)s') %
            {'consumer_uuid': consumer_uuid})
    consumer_uuid = str(uuid.UUID(consumer_uuid))
    data = util.extract_json(req.body, schema)
    allocation_data = data['allocations']

    # Normalize allocation data to dict.
    want_version = req.environ[microversion.MICROVERSION_ENVIRON]
    if not want_version.matches((1, 12)):
        allocations_dict = {}
        # Allocation are list-ish, transform to dict-ish
        for allocation in allocation_data:
            resource_provider_uuid = allocation['resource_provider']['uuid']
            allocations_dict[resource_provider_uuid] = {
                'resources': allocation['resources']
            }
        allocation_data = allocations_dict

    allocation_objects = []
    # Consumer object saved in case we need to delete the auto-created consumer
    # record
    consumer = None
    # Whether we created a new consumer record
    created_new_consumer = False
    if not allocation_data:
        # The allocations are empty, which means wipe them out. Internal
        # to the allocation object this is signalled by a used value of 0.
        # We still need to verify the consumer's generation, though, which
        # we do in _ensure_consumer()
        # NOTE(jaypipes): This will only occur 1.28+. The JSONSchema will
        # prevent an empty allocations object from being passed when there is
        # no consumer generation, so this is safe to do.
        util.ensure_consumer(context, consumer_uuid, data.get('project_id'),
             data.get('user_id'), data.get('consumer_generation'),
             want_version)
        allocations = rp_obj.AllocationList.get_all_by_consumer_id(
            context, consumer_uuid)
        for allocation in allocations:
            allocation.used = 0
            allocation_objects.append(allocation)
    else:
        # If the body includes an allocation for a resource provider
        # that does not exist, raise a 400.
        rp_objs = _resource_providers_by_uuid(context, allocation_data.keys())
        consumer, created_new_consumer = util.ensure_consumer(
            context, consumer_uuid, data.get('project_id'),
            data.get('user_id'), data.get('consumer_generation'),
            want_version)
        for resource_provider_uuid, allocation in allocation_data.items():
            resource_provider = rp_objs[resource_provider_uuid]
            new_allocations = _new_allocations(context,
                                               resource_provider,
                                               consumer,
                                               allocation['resources'])
            allocation_objects.extend(new_allocations)

    allocations = rp_obj.AllocationList(
        context, objects=allocation_objects)

    def _create_allocations(alloc_list):
        try:
            alloc_list.replace_all()
            LOG.debug("Successfully wrote allocations %s", alloc_list)
        except Exception:
            if created_new_consumer:
                _delete_consumers([consumer])
            raise

    try:
        _create_allocations(allocations)
    # InvalidInventory is a parent for several exceptions that
    # indicate either that Inventory is not present, or that
    # capacity limits have been exceeded.
    except exception.NotFound as exc:
        raise webob.exc.HTTPBadRequest(
                _("Unable to allocate inventory for consumer "
                  "%(consumer_uuid)s: %(error)s") %
            {'consumer_uuid': consumer_uuid, 'error': exc})
    except exception.InvalidInventory as exc:
        raise webob.exc.HTTPConflict(
            _('Unable to allocate inventory: %(error)s') % {'error': exc})
    except exception.ConcurrentUpdateDetected as exc:
        raise webob.exc.HTTPConflict(
            _('Inventory and/or allocations changed while attempting to '
              'allocate: %(error)s') % {'error': exc},
              comment=errors.CONCURRENT_UPDATE)

    req.response.status = 204
    req.response.content_type = None
    return req.response
Exemple #41
0
def is_valid_uuid(uuid_string):
    return uuidutils.is_uuid_like(uuid_string)
Exemple #42
0
    def create(self, req, body):
        """Creates a new volume."""
        self.assert_valid_body(body, 'volume')

        LOG.debug('Create volume request body: %s', body)
        context = req.environ['cinder.context']
        volume = body['volume']

        # Check up front for legacy replication parameters to quick fail
        source_replica = volume.get('source_replica')
        if source_replica:
            msg = _("Creating a volume from a replica source was part of the "
                    "replication v1 implementation which is no longer "
                    "available.")
            raise exception.InvalidInput(reason=msg)

        kwargs = {}
        self.validate_name_and_description(volume)

        # NOTE(thingee): v2 API allows name instead of display_name
        if 'name' in volume:
            volume['display_name'] = volume.pop('name')

        # NOTE(thingee): v2 API allows description instead of
        #                display_description
        if 'description' in volume:
            volume['display_description'] = volume.pop('description')

        if 'image_id' in volume:
            volume['imageRef'] = volume.pop('image_id')

        req_volume_type = volume.get('volume_type', None)
        if req_volume_type:
            # Not found exception will be handled at the wsgi level
            kwargs['volume_type'] = (objects.VolumeType.get_by_name_or_id(
                context, req_volume_type))

        kwargs['metadata'] = volume.get('metadata', None)

        snapshot_id = volume.get('snapshot_id')
        if snapshot_id is not None:
            if not uuidutils.is_uuid_like(snapshot_id):
                msg = _("Snapshot ID must be in UUID form.")
                raise exc.HTTPBadRequest(explanation=msg)
            # Not found exception will be handled at the wsgi level
            kwargs['snapshot'] = self.volume_api.get_snapshot(
                context, snapshot_id)
        else:
            kwargs['snapshot'] = None

        source_volid = volume.get('source_volid')
        if source_volid is not None:
            if not uuidutils.is_uuid_like(source_volid):
                msg = _("Source volume ID '%s' must be a "
                        "valid UUID.") % source_volid
                raise exc.HTTPBadRequest(explanation=msg)
            # Not found exception will be handled at the wsgi level
            kwargs['source_volume'] = \
                self.volume_api.get_volume(context,
                                           source_volid)
        else:
            kwargs['source_volume'] = None

        kwargs['group'] = None
        kwargs['consistencygroup'] = None
        consistencygroup_id = volume.get('consistencygroup_id')
        if consistencygroup_id is not None:
            if not uuidutils.is_uuid_like(consistencygroup_id):
                msg = _("Consistency group ID '%s' must be a "
                        "valid UUID.") % consistencygroup_id
                raise exc.HTTPBadRequest(explanation=msg)
            # Not found exception will be handled at the wsgi level
            kwargs['group'] = self.group_api.get(context, consistencygroup_id)

        size = volume.get('size', None)
        if size is None and kwargs['snapshot'] is not None:
            size = kwargs['snapshot']['volume_size']
        elif size is None and kwargs['source_volume'] is not None:
            size = kwargs['source_volume']['size']

        LOG.info("Create volume of %s GB", size)

        if self.ext_mgr.is_loaded('os-image-create'):
            image_ref = volume.get('imageRef')
            if image_ref is not None:
                image_uuid = self._image_uuid_from_ref(image_ref, context)
                kwargs['image_id'] = image_uuid

        kwargs['availability_zone'] = volume.get('availability_zone', None)
        kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
        kwargs['multiattach'] = utils.get_bool_param('multiattach', volume)

        new_volume = self.volume_api.create(context, size,
                                            volume.get('display_name'),
                                            volume.get('display_description'),
                                            **kwargs)

        retval = self._view_builder.detail(req, new_volume)

        return retval
Exemple #43
0
    def delete(self, req, id):
        """Deletes the specified service."""
        context = req.environ['nova.context']
        context.can(services_policies.BASE_POLICY_NAME)

        if api_version_request.is_supported(
                req, min_version=UUID_FOR_ID_MIN_VERSION):
            if not uuidutils.is_uuid_like(id):
                msg = _('Invalid uuid %s') % id
                raise webob.exc.HTTPBadRequest(explanation=msg)
        else:
            try:
                utils.validate_integer(id, 'id')
            except exception.InvalidInput as exc:
                raise webob.exc.HTTPBadRequest(
                    explanation=exc.format_message())

        try:
            service = self.host_api.service_get_by_id(context, id)
            # remove the service from all the aggregates in which it's included
            if service.binary == 'nova-compute':
                # Check to see if there are any instances on this compute host
                # because if there are, we need to block the service (and
                # related compute_nodes record) delete since it will impact
                # resource accounting in Placement and orphan the compute node
                # resource provider.
                num_instances = objects.InstanceList.get_count_by_hosts(
                    context, [service['host']])
                if num_instances:
                    raise webob.exc.HTTPConflict(
                        explanation=_('Unable to delete compute service that '
                                      'is hosting instances. Migrate or '
                                      'delete the instances first.'))

                aggrs = self.aggregate_api.get_aggregates_by_host(
                    context, service.host)
                for ag in aggrs:
                    self.aggregate_api.remove_host_from_aggregate(
                        context, ag.id, service.host)
                # remove the corresponding resource provider record from
                # placement for the compute nodes managed by this service;
                # remember that an ironic compute service can manage multiple
                # nodes
                compute_nodes = objects.ComputeNodeList.get_all_by_host(
                    context, service.host)
                for compute_node in compute_nodes:
                    self.placementclient.delete_resource_provider(context,
                                                                  compute_node,
                                                                  cascade=True)
                # remove the host_mapping of this host.
                try:
                    hm = objects.HostMapping.get_by_host(context, service.host)
                    hm.destroy()
                except exception.HostMappingNotFound:
                    # It's possible to startup a nova-compute service and then
                    # delete it (maybe it was accidental?) before mapping it to
                    # a cell using discover_hosts, so we just ignore this.
                    pass
            service.destroy()

        except exception.ServiceNotFound:
            explanation = _("Service %s not found.") % id
            raise webob.exc.HTTPNotFound(explanation=explanation)
        except exception.ServiceNotUnique:
            explanation = _("Service id %s refers to multiple services.") % id
            raise webob.exc.HTTPBadRequest(explanation=explanation)
Exemple #44
0
 def get_cluster_id(self, name):
     if uuidutils.is_uuid_like(name):
         return name
     for cluster in self.sahara_client.clusters.list():
         if cluster.name == name:
             return cluster.id
Exemple #45
0
    def new_websocket_client(self):
        """Called after a new WebSocket connection has been established."""
        # Reopen the eventlet hub to make sure we don't share an epoll
        # fd with parent and/or siblings, which would be bad
        from eventlet import hubs
        hubs.use_hub()

        # The zun expected behavior is to have token
        # passed to the method GET of the request
        parse = urlparse.urlparse(self.path)
        if parse.scheme not in ('http', 'https'):
            # From a bug in urlparse in Python < 2.7.4 we cannot support
            # special schemes (cf: https://bugs.python.org/issue9374)
            if sys.version_info < (2, 7, 4):
                raise exception.ZunException(
                    _("We do not support scheme '%s' under Python < 2.7.4, "
                      "please use http or https") % parse.scheme)

        query = parse.query
        token = urlparse.parse_qs(query).get("token", [""]).pop()
        uuid = urlparse.parse_qs(query).get("uuid", [""]).pop()

        dbapi = db_api._get_dbdriver_instance()
        ctx = context.get_admin_context(all_tenants=True)

        if uuidutils.is_uuid_like(uuid):
            container = dbapi.get_container_by_uuid(ctx, uuid)
        else:
            container = dbapi.get_container_by_name(ctx, uuid)

        if token != container.websocket_token:
            raise exception.InvalidWebsocketToken(token)

        access_url = '%s?token=%s&uuid=%s' % (CONF.websocket_proxy.base_url,
                                              token, uuid)

        # Verify Origin
        expected_origin_hostname = self.headers.get('Host')
        if ':' in expected_origin_hostname:
            e = expected_origin_hostname
            if '[' in e and ']' in e:
                expected_origin_hostname = e.split(']')[0][1:]
            else:
                expected_origin_hostname = e.split(':')[0]
        expected_origin_hostnames = CONF.websocket_proxy.allowed_origins
        expected_origin_hostnames.append(expected_origin_hostname)
        origin_url = self.headers.get('Origin')

        # missing origin header indicates non-browser client which is OK
        if origin_url is not None:
            origin = urlparse.urlparse(origin_url)
            origin_hostname = origin.hostname
            origin_scheme = origin.scheme
            if origin_hostname == '' or origin_scheme == '':
                detail = _("Origin header not valid.")
                raise exception.ValidationError(detail)
            if origin_hostname not in expected_origin_hostnames:
                detail = _("Origin header does not match this host.")
                raise exception.ValidationError(detail)
            if not self.verify_origin_proto(access_url, origin_scheme):
                detail = _("Origin header protocol does not match this host.")
                raise exception.ValidationError(detail)

        if container.websocket_url:
            target_url = container.websocket_url
            escape = "~"
            close_wait = 0.5
            wscls = WebSocketClient(host_url=target_url,
                                    escape=escape,
                                    close_wait=close_wait)
            wscls.connect()
            self.target = wscls
        else:
            raise exception.InvalidWebsocketUrl()

        # Start proxying
        try:
            self.do_proxy(self.target.ws)
        except Exception as e:
            if self.target.ws:
                self.target.ws.close()
                self.vmsg(_("%Websocket client or target closed"))
            raise
Exemple #46
0
def get_fixed_subnet_id(context, subnet):
    if subnet and not uuidutils.is_uuid_like(subnet):
        return get_subnet(context, subnet, source='name', target='id')
    else:
        return subnet
Exemple #47
0
def validate_id(id):
    if not uuidutils.is_uuid_like(id):
        msg = _("Security group id should be uuid")
        raise exception.Invalid(msg)
    return id
Exemple #48
0
    def _validate_server_network_duplication(self, updated_res,
                                             net_res_id_to_update, net_id):

        LOG.debug("Validate whether network exists on a server.")

        for res in updated_res.values():

            if res.type != "OS::Nova::Server":
                continue

            networks = res.properties.get('networks')
            if not networks:
                continue

            exist_nets = []
            need_validate = False

            def _get_param(res, param_id):
                if isinstance(param_id, six.string_types):
                    return res.parameters.get(param_id, {}).get('default')

            def _get_net_id(uuid_or_network):
                net = uuid_or_network
                if uuidutils.is_uuid_like(net):
                    exist_nets.append(net)
                elif isinstance(net, dict) and len(net) == 1:
                    if net.get('get_param'):
                        net_param = _get_param(res, net['get_param'])
                        if net_param and uuidutils.is_uuid_like(net_param):
                            exist_nets.append(net_param)
                    elif net.get('get_resource'):
                        net_res_id = net['get_resource']
                        if net_res_id == net_res_id_to_update:
                            return True
                        elif isinstance(net_res_id, six.string_types) \
                            and updated_res.get(net_res_id):
                            exist_nets.append(updated_res[net_res_id].id)

            for net in networks:
                port_res_id = net.get('port', {}).get('get_resource')
                net_uuid = net.get('uuid', {})
                network = net.get('network', {})

                if port_res_id:
                    port_res = updated_res.get(port_res_id)

                    if not port_res:
                        continue

                    network_id = port_res.properties.get('network_id')

                    if uuidutils.is_uuid_like(network_id):
                        exist_nets.append(network_id)
                    elif isinstance(network_id, dict) and \
                                    len(network_id) == 1:

                        if network_id.get('get_param'):
                            net_param = _get_param(port_res,
                                                   network_id['get_param'])
                            if uuidutils.is_uuid_like(net_param):
                                exist_nets.append(net_param)
                        elif network_id.get('get_resource'):
                            net_res_id = network_id['get_resource']
                            if net_res_id == net_res_id_to_update:
                                need_validate = True
                            else:
                                net_res = updated_res.get(net_res_id)
                                if net_res:
                                    exist_nets.append(net_res.id)

                if net_uuid:
                    if _get_net_id(net_uuid) is True:
                        need_validate = True

                if network:
                    if _get_net_id(network) is True:
                        need_validate = True

            if need_validate and net_id in exist_nets:
                msg = ("Duplicate networks <%s> found on server <%s>." %
                       (net_id, res.name))
                LOG.error(msg)
                raise exception.PlanResourcesUpdateError(message=msg)
Exemple #49
0
def list_resource_providers(req):
    """GET a list of resource providers.

    On success return a 200 and an application/json body representing
    a collection of resource providers.
    """
    context = req.environ['placement.context']
    want_version = req.environ[microversion.MICROVERSION_ENVIRON]

    schema = rp_schema.GET_RPS_SCHEMA_1_0
    if want_version.matches((1, 18)):
        schema = rp_schema.GET_RPS_SCHEMA_1_18
    elif want_version.matches((1, 14)):
        schema = rp_schema.GET_RPS_SCHEMA_1_14
    elif want_version.matches((1, 4)):
        schema = rp_schema.GET_RPS_SCHEMA_1_4
    elif want_version.matches((1, 3)):
        schema = rp_schema.GET_RPS_SCHEMA_1_3

    util.validate_query_params(req, schema)

    filters = {}
    qpkeys = ('uuid', 'name', 'member_of', 'in_tree', 'resources', 'required')
    for attr in qpkeys:
        if attr in req.GET:
            value = req.GET[attr]
            # special case member_of to always make its value a
            # list, either by accepting the single value, or if it
            # starts with 'in:' splitting on ','.
            # NOTE(cdent): This will all change when we start using
            # JSONSchema validation of query params.
            if attr == 'member_of':
                if value.startswith('in:'):
                    value = value[3:].split(',')
                else:
                    value = [value]
                # Make sure the values are actually UUIDs.
                for aggr_uuid in value:
                    if not uuidutils.is_uuid_like(aggr_uuid):
                        raise webob.exc.HTTPBadRequest(
                            _('Invalid uuid value: %(uuid)s') %
                            {'uuid': aggr_uuid})
            elif attr == 'resources':
                value = util.normalize_resources_qs_param(value)
            elif attr == 'required':
                value = util.normalize_traits_qs_param(value)
            filters[attr] = value
    try:
        resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
            context, filters)
    except exception.ResourceClassNotFound as exc:
        raise webob.exc.HTTPBadRequest(
            _('Invalid resource class in resources parameter: %(error)s') %
            {'error': exc})
    except exception.TraitNotFound as exc:
        raise webob.exc.HTTPBadRequest(
            _('Invalid trait(s) in "required" parameter: %(error)s') %
            {'error': exc})

    response = req.response
    output, last_modified = _serialize_providers(
        req.environ, resource_providers, want_version)
    response.body = encodeutils.to_utf8(jsonutils.dumps(output))
    response.content_type = 'application/json'
    if want_version.matches((1, 15)):
        response.last_modified = last_modified
        response.cache_control = 'no-cache'
    return response
Exemple #50
0
    def report(self, tasks=None, out=None, open_it=False, out_format="html"):
        """Generate report file for specified task.

        :param task_id: UUID, task identifier
        :param tasks: list, UUIDs od tasks or pathes files with tasks results
        :param out: str, output file name
        :param open_it: bool, whether to open output file in web browser
        :param out_format: output format (junit, html or html_static)
        """

        tasks = isinstance(tasks, list) and tasks or [tasks]

        results = []
        message = []
        processed_names = {}
        for task_file_or_uuid in tasks:
            if os.path.exists(os.path.expanduser(task_file_or_uuid)):
                with open(os.path.expanduser(task_file_or_uuid),
                          "r") as inp_js:
                    tasks_results = json.load(inp_js)
                    for result in tasks_results:
                        try:
                            jsonschema.validate(result,
                                                api.Task.TASK_RESULT_SCHEMA)
                        except jsonschema.ValidationError as e:
                            print(
                                _("ERROR: Invalid task result format in %s") %
                                task_file_or_uuid,
                                file=sys.stderr)
                            print(six.text_type(e), file=sys.stderr)
                            return 1

            elif uuidutils.is_uuid_like(task_file_or_uuid):
                tasks_results = map(
                    lambda x: {
                        "key": x["key"],
                        "sla": x["data"]["sla"],
                        "result": x["data"]["raw"],
                        "load_duration": x["data"]["load_duration"],
                        "full_duration": x["data"]["full_duration"]
                    },
                    api.Task.get(task_file_or_uuid).get_results())
            else:
                print(_("ERROR: Invalid UUID or file name passed: %s") %
                      task_file_or_uuid,
                      file=sys.stderr)
                return 1

            for task_result in tasks_results:
                if task_result["key"]["name"] in processed_names:
                    processed_names[task_result["key"]["name"]] += 1
                    task_result["key"]["pos"] = processed_names[
                        task_result["key"]["name"]]
                else:
                    processed_names[task_result["key"]["name"]] = 0
                results.append(task_result)

        if out_format.startswith("html"):
            result = plot.plot(results,
                               include_libs=(out_format == "html_static"))
        elif out_format == "junit":
            test_suite = junit.JUnit("Rally test suite")
            for result in results:
                if isinstance(result["sla"], list):
                    message = ",".join([
                        sla["detail"] for sla in result["sla"]
                        if not sla["success"]
                    ])
                if message:
                    outcome = junit.JUnit.FAILURE
                else:
                    outcome = junit.JUnit.SUCCESS
                test_suite.add_test(result["key"]["name"],
                                    result["full_duration"], outcome, message)
            result = test_suite.to_xml()
        else:
            print(_("Invalid output format: %s") % out_format, file=sys.stderr)
            return 1

        if out:
            output_file = os.path.expanduser(out)

            with open(output_file, "w+") as f:
                f.write(result)
            if open_it:
                webbrowser.open_new_tab("file://" + os.path.realpath(out))
        else:
            print(result)
Exemple #51
0
def _validate_uuid(data, valid_values=None):
    if not uuidutils.is_uuid_like(data):
        msg = _("'%s' is not a valid UUID") % data
        LOG.debug(msg)
        return msg
Exemple #52
0
 def validate_id(self, id):
     if not uuidutils.is_uuid_like(id):
         msg = _("Security group id should be uuid")
         self.raise_invalid_property(msg)
     return id
def is_glance_image(image_href):
    if not isinstance(image_href, six.string_types):
        return False
    return (image_href.startswith('glance://')
            or uuidutils.is_uuid_like(image_href))
Exemple #54
0
    def create(self, req, body):
        """Instruct Cinder to manage a storage object.

        Manages an existing backend storage object (e.g. a Linux logical
        volume or a SAN disk) by creating the Cinder objects required to manage
        it, and possibly renaming the backend storage object
        (driver dependent)

        From an API perspective, this operation behaves very much like a
        volume creation operation, except that properties such as image,
        snapshot and volume references don't make sense, because we are taking
        an existing storage object into Cinder management.

        Required HTTP Body:

        {
         'volume':
          {
           'host': <Cinder host on which the existing storage resides>,
           'ref':  <Driver-specific reference to the existing storage object>,
          }
        }

        See the appropriate Cinder drivers' implementations of the
        manage_volume method to find out the accepted format of 'ref'.

        This API call will return with an error if any of the above elements
        are missing from the request, or if the 'host' element refers to a
        cinder host that is not registered.

        The volume will later enter the error state if it is discovered that
        'ref' is bad.

        Optional elements to 'volume' are:
            name               A name for the new volume.
            description        A description for the new volume.
            volume_type        ID or name of a volume type to associate with
                               the new Cinder volume.  Does not necessarily
                               guarantee that the managed volume will have the
                               properties described in the volume_type.  The
                               driver may choose to fail if it identifies that
                               the specified volume_type is not compatible with
                               the backend storage object.
            metadata           Key/value pairs to be associated with the new
                               volume.
            availability_zone  The availability zone to associate with the new
                               volume.
            bootable           If set to True, marks the volume as bootable.
        """
        context = req.environ['cinder.context']
        authorize(context)

        self.assert_valid_body(body, 'volume')

        volume = body['volume']
        self.validate_name_and_description(volume)

        # Check that the required keys are present, return an error if they
        # are not.
        required_keys = set(['ref', 'host'])
        missing_keys = list(required_keys - set(volume.keys()))

        if missing_keys:
            msg = _("The following elements are required: %s") % \
                ', '.join(missing_keys)
            raise exc.HTTPBadRequest(explanation=msg)

        LOG.debug('Manage volume request body: %s', body)

        kwargs = {}
        req_volume_type = volume.get('volume_type', None)
        if req_volume_type:
            try:
                if not uuidutils.is_uuid_like(req_volume_type):
                    kwargs['volume_type'] = \
                        volume_types.get_volume_type_by_name(
                            context, req_volume_type)
                else:
                    kwargs['volume_type'] = volume_types.get_volume_type(
                        context, req_volume_type)
            except exception.VolumeTypeNotFound as error:
                raise exc.HTTPNotFound(explanation=error.msg)
        else:
            kwargs['volume_type'] = {}

        kwargs['name'] = volume.get('name', None)
        kwargs['description'] = volume.get('description', None)
        kwargs['metadata'] = volume.get('metadata', None)
        kwargs['availability_zone'] = volume.get('availability_zone', None)
        kwargs['bootable'] = volume.get('bootable', False)
        try:
            new_volume = self.volume_api.manage_existing(
                context, volume['host'], volume['ref'], **kwargs)
        except exception.ServiceNotFound:
            msg = _("Service not found.")
            raise exc.HTTPNotFound(explanation=msg)

        utils.add_visible_admin_metadata(new_volume)

        return self._view_builder.detail(req, new_volume)
Exemple #55
0
 def get_instance_uuid(self, vm_name):
     instance_notes = self._get_instance_notes(vm_name)
     if instance_notes and uuidutils.is_uuid_like(instance_notes[0]):
         return instance_notes[0]
Exemple #56
0
def _validate_uuid_format(instance):
    return uuidutils.is_uuid_like(instance)
Exemple #57
0
    def patch(self, connector_uuid, patch):
        """Update an existing volume connector.

        :param connector_uuid: UUID of a volume connector.
        :param patch: a json PATCH document to apply to this volume connector.

        :returns: API-serializable volume connector object.

        :raises: OperationNotPermitted if accessed with specifying a
                 parent node.
        :raises: PatchError if a given patch can not be applied.
        :raises: VolumeConnectorNotFound if no volume connector exists with
                 the specified UUID.
        :raises: InvalidParameterValue if the volume connector's UUID is being
                 changed
        :raises: NodeLocked if node is locked by another conductor
        :raises: NodeNotFound if the node associated with the connector does
                 not exist
        :raises: VolumeConnectorTypeAndIdAlreadyExists if another connector
                 already exists with the same values for type and connector_id
                 fields
        :raises: InvalidUUID if invalid node UUID is passed in the patch.
        :raises: InvalidStateRequested If a node associated with the
                 volume connector is not powered off.
        """
        context = pecan.request.context
        cdict = context.to_policy_values()
        policy.authorize('baremetal:volume:update', cdict, cdict)

        if self.parent_node_ident:
            raise exception.OperationNotPermitted()

        values = api_utils.get_patch_values(patch, '/node_uuid')
        for value in values:
            if not uuidutils.is_uuid_like(value):
                message = _("Expected a UUID for node_uuid, but received "
                            "%(uuid)s.") % {
                                'uuid': six.text_type(value)
                            }
                raise exception.InvalidUUID(message=message)

        rpc_connector = objects.VolumeConnector.get_by_uuid(
            context, connector_uuid)
        connector_dict = rpc_connector.as_dict()
        # NOTE(smoriya):
        # 1) Remove node_id because it's an internal value and
        #    not present in the API object
        # 2) Add node_uuid
        connector_dict['node_uuid'] = connector_dict.pop('node_id', None)
        connector = VolumeConnector(
            **api_utils.apply_jsonpatch(connector_dict, patch))

        # Update only the fields that have changed.
        for field in objects.VolumeConnector.fields:
            try:
                patch_val = getattr(connector, field)
            except AttributeError:
                # Ignore fields that aren't exposed in the API
                continue
            if patch_val == wtypes.Unset:
                patch_val = None
            if rpc_connector[field] != patch_val:
                rpc_connector[field] = patch_val

        rpc_node = objects.Node.get_by_id(context, rpc_connector.node_id)
        notify.emit_start_notification(context,
                                       rpc_connector,
                                       'update',
                                       node_uuid=rpc_node.uuid)
        with notify.handle_error_notification(context,
                                              rpc_connector,
                                              'update',
                                              node_uuid=rpc_node.uuid):
            topic = pecan.request.rpcapi.get_topic_for(rpc_node)
            new_connector = pecan.request.rpcapi.update_volume_connector(
                context, rpc_connector, topic)

        api_connector = VolumeConnector.convert_with_links(new_connector)
        notify.emit_end_notification(context,
                                     new_connector,
                                     'update',
                                     node_uuid=rpc_node.uuid)
        return api_connector
Exemple #58
0
def user_get(request, user_id, admin=True):
    if not uuidutils.is_uuid_like(user_id):
        raise keystone_exceptions.NotFound()

    user = keystoneclient(request, admin=admin).users.get(user_id)
    return VERSIONS.upgrade_v2_user(user)
Exemple #59
0
    def create(self, req, body):
        """Creates a new volume."""
        if not self.is_valid_body(body, 'volume'):
            raise exc.HTTPUnprocessableEntity()

        LOG.debug('Create volume request body: %s', body)
        context = req.environ['cinder.context']
        volume = body['volume']

        kwargs = {}

        req_volume_type = volume.get('volume_type', None)
        if req_volume_type:
            # Not found exception will be handled at the wsgi level
            kwargs['volume_type'] = (volume_types.get_by_name_or_id(
                context, req_volume_type))

        kwargs['metadata'] = volume.get('metadata', None)

        snapshot_id = volume.get('snapshot_id')
        if snapshot_id is not None:
            if not uuidutils.is_uuid_like(snapshot_id):
                msg = _("Snapshot ID must be in UUID form.")
                raise exc.HTTPBadRequest(explanation=msg)
            # Not found exception will be handled at the wsgi level
            kwargs['snapshot'] = self.volume_api.get_snapshot(
                context, snapshot_id)
        else:
            kwargs['snapshot'] = None

        source_volid = volume.get('source_volid')
        if source_volid is not None:
            # Not found exception will be handled at the wsgi level
            kwargs['source_volume'] = self.volume_api.get_volume(
                context, source_volid)
        else:
            kwargs['source_volume'] = None

        size = volume.get('size', None)
        if size is None and kwargs['snapshot'] is not None:
            size = kwargs['snapshot']['volume_size']
        elif size is None and kwargs['source_volume'] is not None:
            size = kwargs['source_volume']['size']

        LOG.info(_LI("Create volume of %s GB"), size)
        multiattach = volume.get('multiattach', False)
        kwargs['multiattach'] = multiattach

        image_href = None
        image_uuid = None
        if self.ext_mgr.is_loaded('os-image-create'):
            # NOTE(jdg): misleading name "imageRef" as it's an image-id
            image_href = volume.get('imageRef')
            if image_href is not None:
                image_uuid = self._image_uuid_from_href(image_href)
                kwargs['image_id'] = image_uuid

        kwargs['availability_zone'] = volume.get('availability_zone', None)

        new_volume = self.volume_api.create(context, size,
                                            volume.get('display_name'),
                                            volume.get('display_description'),
                                            **kwargs)

        retval = _translate_volume_detail_view(context, new_volume, image_uuid)

        return {'volume': retval}
Exemple #60
0
    def get_all(self,
                node=None,
                node_uuid=None,
                address=None,
                marker=None,
                limit=None,
                sort_key='id',
                sort_dir='asc',
                fields=None,
                portgroup=None):
        """Retrieve a list of ports.

        Note that the 'node_uuid' interface is deprecated in favour
        of the 'node' interface

        :param node: UUID or name of a node, to get only ports for that
                           node.
        :param node_uuid: UUID of a node, to get only ports for that
                           node.
        :param address: MAC address of a port, to get the port which has
                        this MAC address.
        :param marker: pagination marker for large data sets.
        :param limit: maximum number of resources to return in a single result.
                      This value cannot be larger than the value of max_limit
                      in the [api] section of the ironic configuration, or only
                      max_limit resources will be returned.
        :param sort_key: column to sort results by. Default: id.
        :param sort_dir: direction to sort. "asc" or "desc". Default: asc.
        :param fields: Optional, a list with a specified set of fields
            of the resource to be returned.
        :param portgroup: UUID or name of a portgroup, to get only ports
                                   for that portgroup.
        :raises: NotAcceptable, HTTPNotFound
        """
        cdict = pecan.request.context.to_policy_values()
        policy.authorize('baremetal:port:get', cdict, cdict)

        api_utils.check_allow_specify_fields(fields)
        self._check_allowed_port_fields(fields)
        self._check_allowed_port_fields([sort_key])
        if portgroup and not api_utils.allow_portgroups_subcontrollers():
            raise exception.NotAcceptable()

        if fields is None:
            fields = _DEFAULT_RETURN_FIELDS

        if not node_uuid and node:
            # We're invoking this interface using positional notation, or
            # explicitly using 'node'.  Try and determine which one.
            # Make sure only one interface, node or node_uuid is used
            if (not api_utils.allow_node_logical_names()
                    and not uuidutils.is_uuid_like(node)):
                raise exception.NotAcceptable()

        return self._get_ports_collection(node_uuid or node,
                                          address,
                                          portgroup,
                                          marker,
                                          limit,
                                          sort_key,
                                          sort_dir,
                                          fields=fields)