Ejemplo n.º 1
0
def wait_for_host_agent(client, host_id, target_state='up'):
    """Wait for neutron agent to become target state

    :param client: A Neutron client object.
    :param host_id: Agent host_id
    :param target_state: up: wait for up status,
        down: wait for down status
    :returns: boolean indicates the agent state matches
        param value target_state_up.
    :raises: exception.Invalid if 'target_state' is not valid.
    :raises: exception.NetworkError if host status didn't match the required
        status after max retry attempts.
    """
    if target_state not in ['up', 'down']:
        raise exception.Invalid(
            'Invalid requested agent state to validate, accepted values: '
            'up, down. Requested state: %(target_state)s' % {
                'target_state': target_state})

    LOG.debug('Validating host %(host_id)s agent is %(status)s',
              {'host_id': host_id,
               'status': target_state})
    is_alive = _validate_agent(client, host=host_id)
    LOG.debug('Agent on host %(host_id)s is %(status)s',
              {'host_id': host_id,
               'status': 'up' if is_alive else 'down'})
    if ((target_state == 'up' and is_alive)
            or (target_state == 'down' and not is_alive)):
        return True
    raise exception.NetworkError(
        'Agent on host %(host)s failed to reach state %(state)s' % {
            'host': host_id, 'state': target_state})
Ejemplo n.º 2
0
 def _get_partition(self, data):
     try:
         return (struct.unpack_from('>I', hashlib.md5(data).digest())[0]
                 >> self.partition_shift)
     except TypeError:
         raise exception.Invalid(
                 _("Invalid data supplied to HashRing.get_hosts."))
Ejemplo n.º 3
0
    def __init__(self, hosts, replicas=None):
        """Create a new hash ring across the specified hosts.

        :param hosts: an iterable of hosts which will be mapped.
        :param replicas: number of hosts to map to each hash partition,
                         or len(hosts), which ever is lesser.
                         Default: CONF.hash_distribution_replicas

        """
        if replicas is None:
            replicas = CONF.hash_distribution_replicas

        try:
            self.hosts = set(hosts)
            self.replicas = replicas if replicas <= len(hosts) else len(hosts)
        except TypeError:
            raise exception.Invalid(
                _("Invalid hosts supplied when building HashRing."))

        self._host_hashes = {}
        for host in hosts:
            key = str(host).encode('utf8')
            key_hash = hashlib.md5(key)
            for p in range(2**CONF.hash_partition_exponent):
                key_hash.update(key)
                hashed_key = self._hash2int(key_hash)
                self._host_hashes[hashed_key] = host
        # Gather the (possibly colliding) resulting hashes into a bisectable
        # list.
        self._partitions = sorted(self._host_hashes.keys())
Ejemplo n.º 4
0
 def validate(value):
     try:
         json.dumps(value)
     except TypeError:
         raise exception.Invalid(_('%s is not JSON serializable') % value)
     else:
         return value
Ejemplo n.º 5
0
    def get_all(self, type=None, detail=None):
        """Retrieve a list of drivers."""
        # FIXME(deva): formatting of the auto-generated REST API docs
        #              will break from a single-line doc string.
        #              This is a result of a bug in sphinxcontrib-pecanwsme
        # https://github.com/dreamhost/sphinxcontrib-pecanwsme/issues/8
        cdict = pecan.request.context.to_policy_values()
        policy.authorize('baremetal:driver:get', cdict, cdict)

        api_utils.check_allow_driver_detail(detail)
        api_utils.check_allow_filter_driver_type(type)
        if type not in (None, 'classic', 'dynamic'):
            raise exception.Invalid(
                _('"type" filter must be one of "classic" or "dynamic", '
                  'if specified.'))

        driver_list = {}
        hw_type_dict = {}
        if type is None or type == 'classic':
            driver_list = pecan.request.dbapi.get_active_driver_dict()
        if type is None or type == 'dynamic':
            hw_type_dict = pecan.request.dbapi.get_active_hardware_type_dict()
        return DriverList.convert_with_links(driver_list,
                                             hw_type_dict,
                                             detail=detail)
Ejemplo n.º 6
0
    def _validate_network_port_event(value):
        """Validate network port event fields.

        :param value: A event dict
        :returns: value
        :raises: Invalid if network port event not in proper format
        """

        validators = {
            'port_id': UuidType.validate,
            'mac_address': MacAddressType.validate,
            'status': wtypes.text,
            'device_id': UuidType.validate,
            'binding:host_id': UuidType.validate,
            'binding:vnic_type': wtypes.text
        }

        keys = set(value)
        net_keys = set(validators)
        net_mandatory_fields = {'port_id', 'mac_address', 'status'}

        # Check all keys are valid for network port event
        invalid = keys.difference(EventType.mandatory_fields.union(net_keys))
        if invalid:
            raise exception.Invalid(
                _('%s are invalid keys') % ', '.join(invalid))

        # Check all mandatory fields for network port event is present
        missing = net_mandatory_fields.difference(keys)
        if missing:
            raise exception.Invalid(
                _('Missing mandatory keys: %s') % ', '.join(missing))

        # Check all values are of expected type
        for key in net_keys:
            if key in value:
                try:
                    validators[key](value[key])
                except Exception as e:
                    msg = (_('Event validation failure for %(key)s. '
                             '%(message)s') % {
                                 'key': key,
                                 'message': e
                             })
                    raise exception.Invalid(msg)

        return value
Ejemplo n.º 7
0
    def post(self, node_ident, callback_url, agent_version=None,
             agent_token=None):
        """Process a heartbeat from the deploy ramdisk.

        :param node_ident: the UUID or logical name of a node.
        :param callback_url: the URL to reach back to the ramdisk.
        :param agent_version: The version of the agent that is heartbeating.
            ``None`` indicates that the agent that is heartbeating is a version
            before sending agent_version was introduced so agent v3.0.0 (the
            last release before sending agent_version was introduced) will be
            assumed.
        :param agent_token: randomly generated validation token.
        :raises: NodeNotFound if node with provided UUID or name was not found.
        :raises: InvalidUuidOrName if node_ident is not valid name or UUID.
        :raises: NoValidHost if RPC topic for node could not be retrieved.
        :raises: NotFound if requested API version does not allow this
            endpoint.
        """
        if not api_utils.allow_ramdisk_endpoints():
            raise exception.NotFound()

        if agent_version and not api_utils.allow_agent_version_in_heartbeat():
            raise exception.InvalidParameterValue(
                _('Field "agent_version" not recognised'))

        cdict = api.request.context.to_policy_values()
        policy.authorize('baremetal:node:ipa_heartbeat', cdict, cdict)

        rpc_node = api_utils.get_rpc_node_with_suffix(node_ident)
        dii = rpc_node['driver_internal_info']
        agent_url = dii.get('agent_url')
        # If we have an agent_url on file, and we get something different
        # we should fail because this is unexpected behavior of the agent.
        if agent_url is not None and agent_url != callback_url:
            LOG.error('Received heartbeat for node %(node)s with '
                      'callback URL %(url)s. This is not expected, '
                      'and the heartbeat will not be processed.',
                      {'node': rpc_node.uuid, 'url': callback_url})
            raise exception.Invalid(
                _('Detected change in ramdisk provided '
                  '"callback_url"'))
        # NOTE(TheJulia): If tokens are required, lets go ahead and fail the
        # heartbeat very early on.
        token_required = CONF.require_agent_token
        if token_required and agent_token is None:
            LOG.error('Agent heartbeat received for node %(node)s '
                      'without an agent token.', {'node': node_ident})
            raise exception.InvalidParameterValue(
                _('Agent token is required for heartbeat processing.'))

        try:
            topic = api.request.rpcapi.get_topic_for(rpc_node)
        except exception.NoValidHost as e:
            e.code = http_client.BAD_REQUEST
            raise

        api.request.rpcapi.heartbeat(
            api.request.context, rpc_node.uuid, callback_url,
            agent_version, agent_token, topic=topic)
Ejemplo n.º 8
0
def _translate_plain_exception(exc_value):
    if isinstance(exc_value, (glance_exc.Forbidden, glance_exc.Unauthorized)):
        return exception.NotAuthorized(exc_value)
    if isinstance(exc_value, glance_exc.NotFound):
        return exception.NotFound(exc_value)
    if isinstance(exc_value, glance_exc.BadRequest):
        return exception.Invalid(exc_value)
    return exc_value
Ejemplo n.º 9
0
def _translate_image_exception(image_id, exc_value):
    if isinstance(exc_value, (exception.Forbidden, exception.Unauthorized)):
        return exception.ImageNotAuthorized(image_id=image_id)
    if isinstance(exc_value, exception.NotFound):
        return exception.ImageNotFound(image_id=image_id)
    if isinstance(exc_value, exception.BadRequest):
        return exception.Invalid(exc_value)
    return exc_value
Ejemplo n.º 10
0
 def _validate_patch(self, patch):
     allowed_fields = ['name', 'extra']
     for p in patch:
         path = p['path'].split('/')[1]
         if path not in allowed_fields:
             msg = _("Cannot update %s in an allocation. Only 'name' and "
                     "'extra' are allowed to be updated.")
             raise exception.Invalid(msg % p['path'])
Ejemplo n.º 11
0
 def _get_partition(self, data):
     try:
         hashed_key = struct.unpack_from('>I',
                                         hashlib.md5(data).digest())[0]
         position = bisect.bisect(self._partitions, hashed_key)
         return position if position < len(self._partitions) else 0
     except TypeError:
         raise exception.Invalid(
             _("Invalid data supplied to HashRing.get_hosts."))
Ejemplo n.º 12
0
 def _get_partition(self, data):
     try:
         key_hash = hashlib.md5(data)
         hashed_key = self._hash2int(key_hash)
         position = bisect.bisect(self._partitions, hashed_key)
         return position if position < len(self._partitions) else 0
     except TypeError:
         raise exception.Invalid(
             _("Invalid data supplied to HashRing.get_hosts."))
Ejemplo n.º 13
0
 def test_validate_fail_glance_conn_problem(self, mock_glance):
     exceptions = (exception.GlanceConnectionFailed('connection fail'),
                   exception.ImageNotAuthorized('not authorized'),
                   exception.Invalid('invalid'))
     mock_glance.side_effect = exceptions
     for exc in exceptions:
         with task_manager.acquire(self.context, self.node.uuid,
                                   shared=True) as task:
             self.assertRaises(exception.InvalidParameterValue,
                               task.driver.deploy.validate, task)
    def gather_chassis_details(self):

        chassis_details = []
        url_member = "/rest/v1/Chassis"
        details = self.call_protocol(url_member, self.host)
        try:
            chassis_list = details['links']['Member']
            for member in chassis_list:
                url_member = member['href']
                chassis_link_details = self.call_protocol(
                    url_member, self.host)

            # Step 1 - Get the cartridges access layer
            cartridges_url_member = (
                chassis_link_details['links']['Cartridges']['href'])
            cartridge_details = self.call_protocol(cartridges_url_member,
                                                   self.host)
            cartridge_members = cartridge_details['links']['Member']
            # Step 2 - Get individual cartridge details
            for cart_member in cartridge_members:
                cartridge_url_member = cart_member['href']
                node_details = self.call_protocol(cartridge_url_member,
                                                  self.host)
                # Step 3 - Get into the Node access layer for that Cartridge
                if 'ComputerSystems' in node_details['links']:
                    node_members = node_details['links']['ComputerSystems']
                    for node_member in node_members:
                        node_url_member = node_member['href']
                        node_inner_details = self.call_protocol(
                            node_url_member, self.host)
                        # Step 4 - Collect cores, memory and arch for each node
                        single_node = dict()
                        single_node['cartridge_slot'] = (
                            cartridge_members.index(cart_member))
                        single_node['node_slot'] = (
                            node_members.index(node_member))
                        single_node['cpus'] = (
                            details['Processors']['NumberOfCores'])
                        single_node['memory_mb'] = (
                            MoonshotDiscovery.convertGBtoMB(
                                node_inner_details['Memory']
                                ['TotalSystemMemoryGB']))
                        single_node['cpu_arch'] = (
                            MoonshotDiscovery.MOONSHOT_CPU_ARCH)
                        single_node['mac_addr'] = (
                            node_inner_details['HostCorrelation']
                            ['HostMACAddress'][0])
                        single_node['mac_addr2'] = (
                            node_inner_details['HostCorrelation']
                            ['HostMACAddress'][1])
                    chassis_details.append(single_node)
        except KeyError as e:
            raise exception.Invalid(e)

        return chassis_details
Ejemplo n.º 15
0
 def _get_partition(self, data):
     try:
         if six.PY3 and data is not None:
             data = data.encode('utf-8')
         key_hash = hashlib.md5(data)
         hashed_key = self._hash2int(key_hash)
         position = bisect.bisect(self._partitions, hashed_key)
         return position if position < len(self._partitions) else 0
     except TypeError:
         raise exception.Invalid(
             _("Invalid data supplied to HashRing.get_hosts."))
Ejemplo n.º 16
0
    def validate(value):
        super(VifType, VifType).validate(value)
        keys = set(value)
        # Check all mandatory fields are present
        missing = VifType.mandatory_fields - keys
        if missing:
            msg = _('Missing mandatory keys: %s') % ', '.join(list(missing))
            raise exception.Invalid(msg)
        UuidOrNameType.validate(value['id'])

        return value
Ejemplo n.º 17
0
def _get_node_next_steps(task, step_type, skip_current_step=True):
    """Get the task's node's next steps.

    This determines what the next (remaining) steps are, and
    returns the index into the steps list that corresponds to the
    next step. The remaining steps are determined as follows:

    * If no steps have been started yet, all the steps
      must be executed
    * If skip_current_step is False, the remaining steps start
      with the current step. Otherwise, the remaining steps
      start with the step after the current one.

    All the steps are in node.driver_internal_info['<step_type>_steps'].
    node.<step_type>_step is the current step that was just executed
    (or None, {} if no steps have been executed yet).
    node.driver_internal_info['<step_type>_step_index'] is the index
    index into the steps list (or None, doesn't exist if no steps have
    been executed yet) and corresponds to node.<step_type>_step.

    :param task: A TaskManager object
    :param step_type: The type of steps to process: 'clean' or 'deploy'.
    :param skip_current_step: True to skip the current step; False to
                              include it.
    :returns: index of the next step; None if there are none to execute.

    """
    valid_types = set(['clean', 'deploy'])
    if step_type not in valid_types:
        # NOTE(rloo): No need to i18n this, since this would be a
        # developer error; it isn't user-facing.
        raise exception.Invalid(
            'step_type must be one of %(valid)s, not %(step)s' % {
                'valid': valid_types,
                'step': step_type
            })
    node = task.node
    if not getattr(node, '%s_step' % step_type):
        # first time through, all steps need to be done. Return the
        # index of the first step in the list.
        return 0

    ind = node.driver_internal_info.get('%s_step_index' % step_type)
    if ind is None:
        return None

    if skip_current_step:
        ind += 1
    if ind >= len(node.driver_internal_info['%s_steps' % step_type]):
        # no steps left to do
        ind = None
    return ind
Ejemplo n.º 18
0
    def validate(value):
        """Validate the input

        :param value: A event dict
        :returns: value
        :raises: Invalid if event not in proper format
        """

        wtypes.DictType(wtypes.text, wtypes.text).validate(value)
        keys = set(value)

        # Check all mandatory fields are present
        missing = EventType.mandatory_fields.difference(keys)
        if missing:
            raise exception.Invalid(_('Missing mandatory keys: %s') % missing)

        # Check event is a supported event
        if value['event'] not in EventType.event_validators:
            raise exception.Invalid(
                _('%s is not a valid event.') % value['event'])

        return EventType.event_validators[value['event']](value)
Ejemplo n.º 19
0
    def patch(self, allocation_ident, patch):
        """Update an existing allocation.

        :param allocation_ident: UUID or logical name of an allocation.
        :param patch: a json PATCH document to apply to this allocation.
        """
        if not api_utils.allow_allocation_update():
            raise webob_exc.HTTPMethodNotAllowed(
                _("The API version does not allow updating allocations"))
        context = api.request.context
        cdict = context.to_policy_values()
        policy.authorize('baremetal:allocation:update', cdict, cdict)
        self._validate_patch(patch)
        names = api_utils.get_patch_values(patch, '/name')
        for name in names:
            if name and not api_utils.is_valid_logical_name(name):
                msg = _("Cannot update allocation with invalid name "
                        "'%(name)s'") % {
                            'name': name
                        }
                raise exception.Invalid(msg)
        rpc_allocation = api_utils.get_rpc_allocation_with_suffix(
            allocation_ident)
        allocation_dict = rpc_allocation.as_dict()
        allocation = Allocation(
            **api_utils.apply_jsonpatch(allocation_dict, patch))
        # Update only the fields that have changed
        for field in objects.Allocation.fields:
            try:
                patch_val = getattr(allocation, field)
            except AttributeError:
                # Ignore fields that aren't exposed in the API
                continue
            if patch_val == wtypes.Unset:
                patch_val = None
            if rpc_allocation[field] != patch_val:
                rpc_allocation[field] = patch_val

        notify.emit_start_notification(context, rpc_allocation, 'update')
        with notify.handle_error_notification(context, rpc_allocation,
                                              'update'):
            rpc_allocation.save()
        notify.emit_end_notification(context, rpc_allocation, 'update')
        return Allocation.convert_with_links(rpc_allocation)
Ejemplo n.º 20
0
    def get_all(self, type=None, detail=None):
        """Retrieve a list of drivers."""
        # FIXME(tenbrae): formatting of the auto-generated REST API docs
        #              will break from a single-line doc string.
        #              This is a result of a bug in sphinxcontrib-pecanwsme
        # https://github.com/dreamhost/sphinxcontrib-pecanwsme/issues/8
        api_utils.check_policy('baremetal:driver:get')
        api_utils.check_allow_driver_detail(detail)
        api_utils.check_allow_filter_driver_type(type)
        if type not in (None, 'classic', 'dynamic'):
            raise exception.Invalid(_(
                '"type" filter must be one of "classic" or "dynamic", '
                'if specified.'))

        if type is None or type == 'dynamic':
            hw_type_dict = api.request.dbapi.get_active_hardware_type_dict()
        else:
            # NOTE(dtantsur): we don't support classic drivers starting with
            # the Rocky release.
            hw_type_dict = {}
        return list_convert_with_links(hw_type_dict, detail=detail)
Ejemplo n.º 21
0
    def patch(self, allocation_ident, patch):
        """Update an existing allocation.

        :param allocation_ident: UUID or logical name of an allocation.
        :param patch: a json PATCH document to apply to this allocation.
        """
        if not api_utils.allow_allocation_update():
            raise webob_exc.HTTPMethodNotAllowed(
                _("The API version does not allow updating allocations"))

        context = api.request.context
        rpc_allocation = api_utils.check_allocation_policy_and_retrieve(
            'baremetal:allocation:update', allocation_ident)
        self._validate_patch(patch)
        names = api_utils.get_patch_values(patch, '/name')
        for name in names:
            if name and not api_utils.is_valid_logical_name(name):
                msg = _("Cannot update allocation with invalid name "
                        "'%(name)s'") % {
                            'name': name
                        }
                raise exception.Invalid(msg)
        allocation_dict = rpc_allocation.as_dict()
        allocation_dict = api_utils.apply_jsonpatch(rpc_allocation.as_dict(),
                                                    patch)
        api_utils.patched_validate_with_schema(allocation_dict,
                                               ALLOCATION_SCHEMA,
                                               ALLOCATION_VALIDATOR)

        api_utils.patch_update_changed_fields(allocation_dict,
                                              rpc_allocation,
                                              fields=objects.Allocation.fields,
                                              schema=ALLOCATION_SCHEMA)

        notify.emit_start_notification(context, rpc_allocation, 'update')
        with notify.handle_error_notification(context, rpc_allocation,
                                              'update'):
            rpc_allocation.save()
        notify.emit_end_notification(context, rpc_allocation, 'update')
        return convert_with_links(rpc_allocation)
    def chassis_connect(self):
        """
            HP Moonshot is a micro-server that has an active
            RIS interface in the Chassis to query for node
            properties and details.

            The connection protocol here is REST based querying
            :return: token for further access
        """
        url_member = "/rest/v1/Sessions"
        url = "https://" + self.host + url_member
        payload = {"UserName": self.username, "Password": self.password}
        headers = {'content-type': 'application/json'}
        response = requests.post(url,
                                 data=json.dumps(payload),
                                 headers=headers,
                                 verify=False)
        details = response.headers
        try:
            self.auth_token = details['x-auth-token']
        except KeyError as e:
            raise exception.Invalid(e)
Ejemplo n.º 23
0
    def __init__(self, hosts, replicas=None):
        """Create a new hash ring across the specified hosts.

        :param hosts: an iterable of hosts which will be mapped.
        :param replicas: number of hosts to map to each hash partition,
                         or len(hosts), which ever is lesser.
                         Default: CONF.hash_distribution_replicas

        """
        if replicas is None:
            replicas = CONF.hash_distribution_replicas

        try:
            self.hosts = list(hosts)
            self.replicas = replicas if replicas <= len(hosts) else len(hosts)
        except TypeError:
            raise exception.Invalid(
                _("Invalid hosts supplied when building HashRing."))

        self.partition_shift = 32 - CONF.hash_partition_exponent
        self.part2host = array.array('H')
        for p in range(2**CONF.hash_partition_exponent):
            self.part2host.append(p % len(hosts))
Ejemplo n.º 24
0
 def validate(value):
     try:
         return strutils.bool_from_string(value, strict=True)
     except ValueError as e:
         # raise Invalid to return 400 (BadRequest) in the API
         raise exception.Invalid(e)
Ejemplo n.º 25
0
    def validate(value):
        """Validate and convert the input to a LocalLinkConnectionType.

        :param value: A dictionary of values to validate, switch_id is a MAC
            address or an OpenFlow based datapath_id, switch_info is an
            optional field. Required Smart NIC fields are port_id and hostname.

        For example::

         {
            'switch_id': mac_or_datapath_id(),
            'port_id': 'Ethernet3/1',
            'switch_info': 'switch1'
         }

        Or for Smart NIC::

         {
            'port_id': 'rep0-0',
            'hostname': 'host1-bf'
         }

        :returns: A dictionary.
        :raises: Invalid if some of the keys in the dictionary being validated
            are unknown, invalid, or some required ones are missing.
        """
        wtypes.DictType(wtypes.text, wtypes.text).validate(value)

        keys = set(value)

        # This is to workaround an issue when an API object is initialized from
        # RPC object, in which dictionary fields that are set to None become
        # empty dictionaries
        if not keys:
            return value

        invalid = keys - LocalLinkConnectionType.valid_fields
        if invalid:
            raise exception.Invalid(_('%s are invalid keys') % (invalid))

        # Check any mandatory fields sets are present
        for mandatory_set in LocalLinkConnectionType.mandatory_fields_list:
            if mandatory_set <= keys:
                break
        else:
            msg = _('Missing mandatory keys. Required keys are '
                    '%(required_fields)s. Or in case of Smart NIC '
                    '%(smart_nic_required_fields)s. '
                    'Submitted keys are %(keys)s .') % {
                        'required_fields':
                        LocalLinkConnectionType.local_link_mandatory_fields,
                        'smart_nic_required_fields':
                        LocalLinkConnectionType.smart_nic_mandatory_fields,
                        'keys': keys
                    }
            raise exception.Invalid(msg)

        # Check switch_id is either a valid mac address or
        # OpenFlow datapath_id and normalize it.
        try:
            value['switch_id'] = utils.validate_and_normalize_mac(
                value['switch_id'])
        except exception.InvalidMAC:
            try:
                value['switch_id'] = utils.validate_and_normalize_datapath_id(
                    value['switch_id'])
            except exception.InvalidDatapathID:
                raise exception.InvalidSwitchID(switch_id=value['switch_id'])
        except KeyError:
            # In Smart NIC case 'switch_id' is optional.
            pass

        return value
Ejemplo n.º 26
0
    def post(self, allocation):
        """Create a new allocation.

        :param allocation: an allocation within the request body.
        """
        context = api.request.context
        cdict = context.to_policy_values()
        allocation = self._authorize_create_allocation(allocation)

        if (allocation.get('name')
                and not api_utils.is_valid_logical_name(allocation['name'])):
            msg = _("Cannot create allocation with invalid name "
                    "'%(name)s'") % {
                        'name': allocation['name']
                    }
            raise exception.Invalid(msg)

        # TODO(TheJulia): We need to likely look at refactoring post
        # processing for allocations as pep8 says it is a complexity of 19,
        # although it is not actually that horrible since it is phased out
        # just modifying/assembling the allocation. Given that, it seems
        # not great to try for a full method rewrite at the same time as
        # RBAC work, so the complexity limit is being raised. :(
        if (CONF.oslo_policy.enforce_new_defaults
                and cdict.get('system_scope') != 'all'):
            # if not a system scope originated request, we need to check/apply
            # an owner - But we can only do this with when new defaults are
            # enabled.
            project_id = cdict.get('project_id')
            req_alloc_owner = allocation.get('owner')
            if req_alloc_owner:
                if not api_utils.check_policy_true(
                        'baremetal:allocation:create_restricted'):
                    if req_alloc_owner != project_id:
                        msg = _("Cannot create allocation with an owner "
                                "Project ID value %(req_owner)s not matching "
                                "the requestor Project ID %(project)s. "
                                "Policy baremetal:allocation:create_restricted"
                                " is required for this capability.") % {
                                    'req_owner': req_alloc_owner,
                                    'project': project_id
                                }
                        raise exception.NotAuthorized(msg)
                # NOTE(TheJulia): IF not restricted, i.e. else above,
                # their supplied allocation owner is okay, they are allowed
                # to provide an override by policy.
            else:
                # An allocation owner was not supplied, we need to save one.
                allocation['owner'] = project_id
        node = None
        if allocation.get('node'):
            if api_utils.allow_allocation_backfill():
                try:
                    node = api_utils.get_rpc_node(allocation['node'])
                    api_utils.check_owner_policy(
                        'node',
                        'baremetal:node:get',
                        node.owner,
                        node.lessee,
                        conceal_node=allocation['node'])
                except exception.NodeNotFound as exc:
                    exc.code = http_client.BAD_REQUEST
                    raise
            else:
                msg = _("Cannot set node when creating an allocation "
                        "in this API version")
                raise exception.Invalid(msg)

        if not allocation.get('resource_class'):
            if node:
                allocation['resource_class'] = node.resource_class
            else:
                msg = _("The resource_class field is mandatory when not "
                        "backfilling")
                raise exception.Invalid(msg)

        if allocation.get('candidate_nodes'):
            # Convert nodes from names to UUIDs and check their validity
            try:
                owner = None
                if not api_utils.check_policy_true(
                        'baremetal:allocation:create_restricted'):
                    owner = cdict.get('project_id')
                # Filter the candidate search by the requestor project ID
                # if any. The result is processes authenticating with system
                # scope will not be impacted, where as project scoped requests
                # will need additional authorization.
                converted = api.request.dbapi.check_node_list(
                    allocation['candidate_nodes'], project=owner)
            except exception.NodeNotFound as exc:
                exc.code = http_client.BAD_REQUEST
                raise
            else:
                # Make sure we keep the ordering of candidate nodes.
                allocation['candidate_nodes'] = [
                    converted[ident] for ident in allocation['candidate_nodes']
                ]

        # NOTE(yuriyz): UUID is mandatory for notifications payload
        if not allocation.get('uuid'):
            if node and node.instance_uuid:
                # When backfilling without UUID requested, assume that the
                # target instance_uuid is the desired UUID
                allocation['uuid'] = node.instance_uuid
            else:
                allocation['uuid'] = uuidutils.generate_uuid()

        new_allocation = objects.Allocation(context, **allocation)
        if node:
            new_allocation.node_id = node.id
            topic = api.request.rpcapi.get_topic_for(node)
        else:
            topic = api.request.rpcapi.get_random_topic()

        notify.emit_start_notification(context, new_allocation, 'create')
        with notify.handle_error_notification(context, new_allocation,
                                              'create'):
            new_allocation = api.request.rpcapi.create_allocation(
                context, new_allocation, topic)
        notify.emit_end_notification(context, new_allocation, 'create')

        # Set the HTTP Location Header
        api.response.location = link.build_url('allocations',
                                               new_allocation.uuid)
        return convert_with_links(new_allocation)
Ejemplo n.º 27
0
    def post(self, allocation):
        """Create a new allocation.

        :param allocation: an allocation within the request body.
        """
        context = api.request.context
        allocation = self._authorize_create_allocation(allocation)

        if (allocation.name
                and not api_utils.is_valid_logical_name(allocation.name)):
            msg = _("Cannot create allocation with invalid name "
                    "'%(name)s'") % {
                        'name': allocation.name
                    }
            raise exception.Invalid(msg)

        if allocation.traits:
            for trait in allocation.traits:
                api_utils.validate_trait(trait)

        node = None
        if allocation.node is not atypes.Unset:
            if api_utils.allow_allocation_backfill():
                try:
                    node = api_utils.get_rpc_node(allocation.node)
                except exception.NodeNotFound as exc:
                    exc.code = http_client.BAD_REQUEST
                    raise
            else:
                msg = _("Cannot set node when creating an allocation "
                        "in this API version")
                raise exception.Invalid(msg)

        if not allocation.resource_class:
            if node:
                allocation.resource_class = node.resource_class
            else:
                msg = _("The resource_class field is mandatory when not "
                        "backfilling")
                raise exception.Invalid(msg)

        if allocation.candidate_nodes:
            # Convert nodes from names to UUIDs and check their validity
            try:
                converted = api.request.dbapi.check_node_list(
                    allocation.candidate_nodes)
            except exception.NodeNotFound as exc:
                exc.code = http_client.BAD_REQUEST
                raise
            else:
                # Make sure we keep the ordering of candidate nodes.
                allocation.candidate_nodes = [
                    converted[ident] for ident in allocation.candidate_nodes
                ]

        all_dict = allocation.as_dict()

        # NOTE(yuriyz): UUID is mandatory for notifications payload
        if not all_dict.get('uuid'):
            if node and node.instance_uuid:
                # When backfilling without UUID requested, assume that the
                # target instance_uuid is the desired UUID
                all_dict['uuid'] = node.instance_uuid
            else:
                all_dict['uuid'] = uuidutils.generate_uuid()

        new_allocation = objects.Allocation(context, **all_dict)
        if node:
            new_allocation.node_id = node.id
            topic = api.request.rpcapi.get_topic_for(node)
        else:
            topic = api.request.rpcapi.get_random_topic()

        notify.emit_start_notification(context, new_allocation, 'create')
        with notify.handle_error_notification(context, new_allocation,
                                              'create'):
            new_allocation = api.request.rpcapi.create_allocation(
                context, new_allocation, topic)
        notify.emit_end_notification(context, new_allocation, 'create')

        # Set the HTTP Location Header
        api.response.location = link.build_url('allocations',
                                               new_allocation.uuid)
        return Allocation.convert_with_links(new_allocation)
Ejemplo n.º 28
0
    def post(self, port):
        """Create a new port.

        :param port: a port within the request body.
        :raises: NotAcceptable, HTTPNotFound, Conflict
        """
        if self.parent_node_ident or self.parent_portgroup_ident:
            raise exception.OperationNotPermitted()

        context = api.request.context
        cdict = context.to_policy_values()
        policy.authorize('baremetal:port:create', cdict, cdict)

        pdict = port.as_dict()
        self._check_allowed_port_fields(pdict)

        if (port.is_smartnic
                and not types.locallinkconnectiontype.validate_for_smart_nic(
                    port.local_link_connection)):
            raise exception.Invalid("Smart NIC port must have port_id "
                                    "and hostname in local_link_connection")

        create_remotely = api.request.rpcapi.can_send_create_port()
        if (not create_remotely and pdict.get('portgroup_uuid')):
            # NOTE(mgoddard): In RPC API v1.41, port creation was moved to the
            # conductor service to facilitate validation of the physical
            # network field of ports in portgroups. During a rolling upgrade,
            # the RPCAPI will reject the create_port method, so we need to
            # create the port locally. If the port is a member of a portgroup,
            # we are unable to perform the validation and must reject the
            # request.
            raise exception.NotAcceptable()

        vif = api_utils.handle_post_port_like_extra_vif(pdict)

        if (pdict.get('portgroup_uuid') and (pdict.get('pxe_enabled') or vif)):
            rpc_pg = objects.Portgroup.get_by_uuid(context,
                                                   pdict['portgroup_uuid'])
            if not rpc_pg.standalone_ports_supported:
                msg = _("Port group %s doesn't support standalone ports. "
                        "This port cannot be created as a member of that "
                        "port group because either 'extra/vif_port_id' "
                        "was specified or 'pxe_enabled' was set to True.")
                raise exception.Conflict(msg % pdict['portgroup_uuid'])

        # NOTE(yuriyz): UUID is mandatory for notifications payload
        if not pdict.get('uuid'):
            pdict['uuid'] = uuidutils.generate_uuid()

        rpc_port = objects.Port(context, **pdict)
        rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)

        notify_extra = {
            'node_uuid': port.node_uuid,
            'portgroup_uuid': port.portgroup_uuid
        }
        notify.emit_start_notification(context, rpc_port, 'create',
                                       **notify_extra)
        with notify.handle_error_notification(context, rpc_port, 'create',
                                              **notify_extra):
            # NOTE(mgoddard): In RPC API v1.41, port creation was moved to the
            # conductor service to facilitate validation of the physical
            # network field of ports in portgroups. During a rolling upgrade,
            # the RPCAPI will reject the create_port method, so we need to
            # create the port locally.
            if create_remotely:
                topic = api.request.rpcapi.get_topic_for(rpc_node)
                new_port = api.request.rpcapi.create_port(
                    context, rpc_port, topic)
            else:
                rpc_port.create()
                new_port = rpc_port
        notify.emit_end_notification(context, new_port, 'create',
                                     **notify_extra)
        # Set the HTTP Location Header
        api.response.location = link.build_url('ports', new_port.uuid)
        return Port.convert_with_links(new_port)
Ejemplo n.º 29
0
    def post(self, port):
        """Create a new port.

        :param port: a port within the request body.
        :raises: NotAcceptable, HTTPNotFound, Conflict
        """
        if self.parent_node_ident or self.parent_portgroup_ident:
            raise exception.OperationNotPermitted()

        context = api.request.context
        api_utils.check_policy('baremetal:port:create')

        # NOTE(lucasagomes): Create the node_id attribute on-the-fly
        #                    to satisfy the api -> rpc object
        #                    conversion.
        node = api_utils.replace_node_uuid_with_id(port)

        self._check_allowed_port_fields(port)

        portgroup = None
        if port.get('portgroup_uuid'):
            try:
                portgroup = objects.Portgroup.get(api.request.context,
                                                  port.pop('portgroup_uuid'))
                if portgroup.node_id != node.id:
                    raise exception.BadRequest(_('Port can not be added to a '
                                                 'portgroup belonging to a '
                                                 'different node.'))
                # NOTE(lucasagomes): Create the portgroup_id attribute
                #                    on-the-fly to satisfy the api ->
                #                    rpc object conversion.
                port['portgroup_id'] = portgroup.id
            except exception.PortgroupNotFound as e:
                # Change error code because 404 (NotFound) is inappropriate
                # response for a POST request to create a Port
                e.code = http_client.BAD_REQUEST  # BadRequest
                raise e

        if port.get('is_smartnic'):
            try:
                api_utils.LOCAL_LINK_SMART_NIC_VALIDATOR(
                    'local_link_connection',
                    port.get('local_link_connection'))
            except exception.Invalid:
                raise exception.Invalid(
                    "Smart NIC port must have port_id "
                    "and hostname in local_link_connection")

        physical_network = port.get('physical_network')
        if physical_network is not None and not physical_network:
            raise exception.Invalid('A non-empty value is required when '
                                    'setting physical_network')

        vif = api_utils.handle_post_port_like_extra_vif(port)

        if (portgroup and (port.get('pxe_enabled') or vif)):
            if not portgroup.standalone_ports_supported:
                msg = _("Port group %s doesn't support standalone ports. "
                        "This port cannot be created as a member of that "
                        "port group because either 'extra/vif_port_id' "
                        "was specified or 'pxe_enabled' was set to True.")
                raise exception.Conflict(
                    msg % portgroup.uuid)

        # NOTE(yuriyz): UUID is mandatory for notifications payload
        if not port.get('uuid'):
            port['uuid'] = uuidutils.generate_uuid()

        rpc_port = objects.Port(context, **port)

        notify_extra = {
            'node_uuid': node.uuid,
            'portgroup_uuid': portgroup and portgroup.uuid or None
        }
        notify.emit_start_notification(context, rpc_port, 'create',
                                       **notify_extra)
        with notify.handle_error_notification(context, rpc_port, 'create',
                                              **notify_extra):
            topic = api.request.rpcapi.get_topic_for(node)
            new_port = api.request.rpcapi.create_port(context, rpc_port,
                                                      topic)
        notify.emit_end_notification(context, new_port, 'create',
                                     **notify_extra)
        # Set the HTTP Location Header
        api.response.location = link.build_url('ports', new_port.uuid)
        return convert_with_links(new_port)
Ejemplo n.º 30
0
    def patch(self, port_uuid, patch):
        """Update an existing port.

        :param port_uuid: UUID of a port.
        :param patch: a json PATCH document to apply to this port.
        :raises: NotAcceptable, HTTPNotFound
        """
        if self.parent_node_ident or self.parent_portgroup_ident:
            raise exception.OperationNotPermitted()

        api_utils.patch_validate_allowed_fields(patch, PATCH_ALLOWED_FIELDS)

        context = api.request.context
        fields_to_check = set()
        for field in (self.advanced_net_fields
                      + ['portgroup_uuid', 'physical_network',
                         'is_smartnic', 'local_link_connection/network_type']):
            field_path = '/%s' % field
            if (api_utils.get_patch_values(patch, field_path)
                    or api_utils.is_path_removed(patch, field_path)):
                fields_to_check.add(field)
        self._check_allowed_port_fields(fields_to_check)

        rpc_port, rpc_node = api_utils.check_port_policy_and_retrieve(
            'baremetal:port:update', port_uuid)

        port_dict = rpc_port.as_dict()
        # NOTE(lucasagomes):
        # 1) Remove node_id because it's an internal value and
        #    not present in the API object
        # 2) Add node_uuid
        port_dict.pop('node_id', None)
        port_dict['node_uuid'] = rpc_node.uuid
        # NOTE(vsaienko):
        # 1) Remove portgroup_id because it's an internal value and
        #    not present in the API object
        # 2) Add portgroup_uuid
        portgroup = None
        if port_dict.get('portgroup_id'):
            portgroup = objects.Portgroup.get_by_id(
                context, port_dict.pop('portgroup_id'))
        port_dict['portgroup_uuid'] = portgroup and portgroup.uuid or None

        port_dict = api_utils.apply_jsonpatch(port_dict, patch)

        api_utils.handle_patch_port_like_extra_vif(
            rpc_port, port_dict['internal_info'], patch)

        try:
            if api_utils.is_path_updated(patch, '/portgroup_uuid'):
                if port_dict.get('portgroup_uuid'):
                    portgroup = objects.Portgroup.get_by_uuid(
                        context, port_dict['portgroup_uuid'])
                else:
                    portgroup = None
        except exception.PortGroupNotFound as e:
            # Change error code because 404 (NotFound) is inappropriate
            # response for a PATCH request to change a Port
            e.code = http_client.BAD_REQUEST  # BadRequest
            raise

        try:
            if port_dict['node_uuid'] != rpc_node.uuid:
                rpc_node = objects.Node.get(
                    api.request.context, port_dict['node_uuid'])
        except exception.NodeNotFound as e:
            # Change error code because 404 (NotFound) is inappropriate
            # response for a PATCH request to change a Port
            e.code = http_client.BAD_REQUEST  # BadRequest
            raise

        api_utils.patched_validate_with_schema(
            port_dict, PORT_PATCH_SCHEMA, PORT_PATCH_VALIDATOR)

        api_utils.patch_update_changed_fields(
            port_dict, rpc_port, fields=objects.Port.fields,
            schema=PORT_PATCH_SCHEMA,
            id_map={
                'node_id': rpc_node.id,
                'portgroup_id': portgroup and portgroup.id or None
            }
        )

        if (rpc_node.provision_state == ir_states.INSPECTING
                and api_utils.allow_inspect_wait_state()):
            msg = _('Cannot update port "%(port)s" on "%(node)s" while it is '
                    'in state "%(state)s".') % {'port': rpc_port.uuid,
                                                'node': rpc_node.uuid,
                                                'state': ir_states.INSPECTING}
            raise exception.ClientSideError(msg,
                                            status_code=http_client.CONFLICT)

        if (api_utils.is_path_updated(patch, '/physical_network')
            and rpc_port['physical_network'] is not None
                and not rpc_port['physical_network']):
            raise exception.Invalid('A non-empty value is required when '
                                    'setting physical_network')

        notify_extra = {'node_uuid': rpc_node.uuid,
                        'portgroup_uuid': portgroup and portgroup.uuid or None}
        notify.emit_start_notification(context, rpc_port, 'update',
                                       **notify_extra)
        with notify.handle_error_notification(context, rpc_port, 'update',
                                              **notify_extra):
            topic = api.request.rpcapi.get_topic_for(rpc_node)
            new_port = api.request.rpcapi.update_port(context, rpc_port,
                                                      topic)

        api_port = convert_with_links(new_port)
        notify.emit_end_notification(context, new_port, 'update',
                                     **notify_extra)

        return api_port