Esempio n. 1
0
    def reserve_nodes(self, tag, nodes):
        # Ensure consistent sort order so we don't run into deadlocks.
        nodes.sort()

        result = []
        session = get_session()
        with session.begin():
            # TODO(deva): Optimize this by trying to reserve all the nodes
            #             at once, and fall back to reserving one at a time
            #             only if needed to determine the cause of an error.
            for node in nodes:
                query = model_query(models.Node, session=session)
                query = add_identity_filter(query, node)

                # Be optimistic and assume we usually get a reservation.
                count = query.filter_by(reservation=None).\
                            update({'reservation': tag})

                if count != 1:
                    try:
                        ref = query.one()
                    except NoResultFound:
                        raise exception.NodeNotFound(node=node)
                    else:
                        raise exception.NodeLocked(node=node)
                ref = query.one()
                result.append(ref)

        return result
Esempio n. 2
0
    def destroy_node(self, node):
        session = get_session()
        with session.begin():
            query = model_query(models.Node, session=session)
            query = add_identity_filter(query, node)

            try:
                node_ref = query.one()
            except NoResultFound:
                raise exception.NodeNotFound(node=node)
            if node_ref['reservation'] is not None:
                raise exception.NodeLocked(node=node)

            # Get node ID, if an UUID was supplied. The ID is
            # required for deleting all ports, attached to the node.
            if uuidutils.is_uuid_like(node):
                node_id = node_ref['id']
            else:
                node_id = node

            port_query = model_query(models.Port, session=session)
            port_query = add_port_filter_by_node(port_query, node_id)
            port_query.delete()

            query.delete()
Esempio n. 3
0
 def test_node_locked(self, mock_check, mock_acquire):
     mgr = mock.Mock(spec=['iter_nodes'])
     mgr.iter_nodes.return_value = [('1', 'd1'), ('2', 'd2')]
     mock_acquire.side_effect = exception.NodeLocked("boom")
     discoverd.DiscoverdInspect()._periodic_check_result(
         mgr, mock.sentinel.context)
     self.assertFalse(mock_check.called)
     self.assertEqual(2, mock_acquire.call_count)
Esempio n. 4
0
 def test_delete_port_node_locked(self, mock_dpt):
     self.node.reserve(self.context, 'fake', self.node.uuid)
     mock_dpt.side_effect = exception.NodeLocked(node='fake-node',
                                                 host='fake-host')
     ret = self.delete('/ports/%s' % self.port.uuid, expect_errors=True)
     self.assertEqual(http_client.CONFLICT, ret.status_code)
     self.assertTrue(ret.json['error_message'])
     self.assertTrue(mock_dpt.called)
Esempio n. 5
0
def _check_port_change_forbidden(port, session):
    node_id = port['node_id']
    if node_id is not None:
        query = model_query(models.Node, session=session)
        query = query.filter_by(id=node_id)
        node_ref = query.one()
        if node_ref['reservation'] is not None:
            raise exception.NodeLocked(node=node_id)
Esempio n. 6
0
 def test_node_locked(self, mock_check, mock_acquire):
     iter_nodes_ret = [('1', 'd1'), ('2', 'd2')]
     mock_acquire.side_effect = iter([exception.NodeLocked("boom")] *
                                     len(iter_nodes_ret))
     mgr = mock.MagicMock(spec=['iter_nodes'])
     mgr.iter_nodes.return_value = iter_nodes_ret
     inspector.Inspector()._periodic_check_result(mgr,
                                                  mock.sentinel.context)
     self.assertFalse(mock_check.called)
     self.assertEqual(2, mock_acquire.call_count)
Esempio n. 7
0
    def test_excl_lock_reserve_exception(self, get_ports_mock, get_driver_mock,
                                         reserve_mock, release_mock,
                                         node_get_mock):
        reserve_mock.side_effect = exception.NodeLocked(node='foo', host='foo')

        self.assertRaises(exception.NodeLocked, task_manager.TaskManager,
                          self.context, 'fake-node-id')

        reserve_mock.assert_called_once_with(self.host, 'fake-node-id')
        self.assertFalse(get_ports_mock.called)
        self.assertFalse(get_driver_mock.called)
        self.assertFalse(release_mock.called)
        self.assertFalse(node_get_mock.called)
Esempio n. 8
0
    def test_excl_lock_exception_then_lock(self, get_ports_mock,
                                           get_driver_mock, reserve_mock,
                                           release_mock, node_get_mock):
        retry_attempts = 3
        self.config(node_locked_retry_attempts=retry_attempts,
                    group='conductor')

        # Fail on the first lock attempt, succeed on the second.
        reserve_mock.side_effect = [
            exception.NodeLocked(node='foo', host='foo'), self.node
        ]

        with task_manager.TaskManager(self.context, 'fake-node-id') as task:
            self.assertFalse(task.shared)

        reserve_mock.assert_called(self.context, self.host, 'fake-node-id')
        self.assertEqual(2, reserve_mock.call_count)
Esempio n. 9
0
 def release_node(self, tag, node_id):
     with _session_for_write():
         query = model_query(models.Node)
         query = add_identity_filter(query, node_id)
         # be optimistic and assume we usually release a reservation
         count = query.filter_by(reservation=tag).update(
             {'reservation': None}, synchronize_session=False)
         try:
             if count != 1:
                 node = query.one()
                 if node['reservation'] is None:
                     raise exception.NodeNotLocked(node=node.uuid)
                 else:
                     raise exception.NodeLocked(node=node.uuid,
                                                host=node['reservation'])
         except NoResultFound:
             raise exception.NodeNotFound(node_id)
Esempio n. 10
0
 def reserve_node(self, tag, node_id):
     with _session_for_write():
         query = _get_node_query_with_tags()
         query = add_identity_filter(query, node_id)
         # be optimistic and assume we usually create a reservation
         count = query.filter_by(reservation=None).update(
             {'reservation': tag}, synchronize_session=False)
         try:
             node = query.one()
             if count != 1:
                 # Nothing updated and node exists. Must already be
                 # locked.
                 raise exception.NodeLocked(node=node.uuid,
                                            host=node['reservation'])
             return node
         except NoResultFound:
             raise exception.NodeNotFound(node_id)
Esempio n. 11
0
 def test_delete_volume_connector_node_locked(self, mock_notify, mock_dvc):
     self.node.reserve(self.context, 'fake', self.node.uuid)
     mock_dvc.side_effect = exception.NodeLocked(node='fake-node',
                                                 host='fake-host')
     ret = self.delete('/volume/connectors/%s' % self.connector.uuid,
                       expect_errors=True, headers=self.headers)
     self.assertEqual(http_client.CONFLICT, ret.status_code)
     self.assertTrue(ret.json['error_message'])
     self.assertTrue(mock_dvc.called)
     mock_notify.assert_has_calls([mock.call(mock.ANY, mock.ANY, 'delete',
                                   obj_fields.NotificationLevel.INFO,
                                   obj_fields.NotificationStatus.START,
                                   node_uuid=self.node.uuid),
                                   mock.call(mock.ANY, mock.ANY, 'delete',
                                   obj_fields.NotificationLevel.ERROR,
                                   obj_fields.NotificationStatus.ERROR,
                                   node_uuid=self.node.uuid)])
Esempio n. 12
0
    def test_excl_lock_reserve_exception(self, get_ports_mock, get_driver_mock,
                                         reserve_mock, release_mock,
                                         node_get_mock):
        retry_attempts = 3
        self.config(node_locked_retry_attempts=retry_attempts,
                    group='conductor')
        reserve_mock.side_effect = exception.NodeLocked(node='foo', host='foo')

        self.assertRaises(exception.NodeLocked, task_manager.TaskManager,
                          self.context, 'fake-node-id')

        reserve_mock.assert_called_with(self.context, self.host,
                                        'fake-node-id')
        self.assertEqual(retry_attempts, reserve_mock.call_count)
        self.assertFalse(get_ports_mock.called)
        self.assertFalse(get_driver_mock.called)
        self.assertFalse(release_mock.called)
        self.assertFalse(node_get_mock.called)
Esempio n. 13
0
    def test_excl_lock_upgade_exception_no_retries(
            self, get_voltgt_mock, get_volconn_mock, get_portgroups_mock,
            get_ports_mock, build_driver_mock,
            reserve_mock, release_mock, node_get_mock):
        retry_attempts = 3
        self.config(node_locked_retry_attempts=retry_attempts,
                    group='conductor')

        node_get_mock.return_value = self.node
        # Fail on the first lock attempt, succeed on the second.
        reserve_mock.side_effect = [exception.NodeLocked(node='foo',
                                                         host='foo'),
                                    self.node]

        task = task_manager.TaskManager(self.context, 'fake-node-id',
                                        shared=True)
        self.assertRaises(exception.NodeLocked,
                          task.upgrade_lock, retry=False)

        reserve_mock.assert_called_once_with(self.context, self.host,
                                             'fake-node-id')
Esempio n. 14
0
    def release_nodes(self, tag, nodes):
        session = get_session()
        with session.begin():
            # TODO(deva): Optimize this by trying to release all the nodes
            #             at once, and fall back to releasing one at a time
            #             only if needed to determine the cause of an error.
            for node in nodes:
                query = model_query(models.Node, session=session)
                query = add_identity_filter(query, node)

                # be optimistic and assume we usually release a reservation
                count = query.filter_by(reservation=tag).\
                            update({'reservation': None})

                if count != 1:
                    try:
                        ref = query.one()
                    except NoResultFound:
                        raise exception.NodeNotFound(node=node)
                    else:
                        if ref['reservation'] is not None:
                            raise exception.NodeLocked(node=node)
Esempio n. 15
0
    def provision(self, node_ident, target, configdrive=None):
        """Asynchronous trigger the provisioning of the node.

        This will set the target provision state of the node, and a
        background task will begin which actually applies the state
        change. This call will return a 202 (Accepted) indicating the
        request was accepted and is in progress; the client should
        continue to GET the status of this node to observe the status
        of the requested action.

        :param node_ident: UUID or logical name of a node.
        :param target: The desired provision state of the node.
        :param configdrive: Optional. A gzipped and base64 encoded
            configdrive. Only valid when setting provision state
            to "active".
        :raises: NodeLocked (HTTP 409) if the node is currently locked.
        :raises: ClientSideError (HTTP 409) if the node is already being
                 provisioned.
        :raises: InvalidStateRequested (HTTP 400) if the requested transition
                 is not possible from the current state.
        :raises: NotAcceptable (HTTP 406) if the API version specified does
                 not allow the requested state transition.
        """
        check_allow_management_verbs(target)
        rpc_node = api_utils.get_rpc_node(node_ident)
        topic = pecan.request.rpcapi.get_topic_for(rpc_node)

        # Normally, we let the task manager recognize and deal with
        # NodeLocked exceptions. However, that isn't done until the RPC calls
        # below. In order to main backward compatibility with our API HTTP
        # response codes, we have this check here to deal with cases where
        # a node is already being operated on (DEPLOYING or such) and we
        # want to continue returning 409. Without it, we'd return 400.
        if rpc_node.reservation:
            raise exception.NodeLocked(node=rpc_node.uuid,
                                       host=rpc_node.reservation)

        if (target in (ir_states.ACTIVE, ir_states.REBUILD)
                and rpc_node.maintenance):
            raise exception.NodeInMaintenance(op=_('provisioning'),
                                              node=rpc_node.uuid)

        m = ir_states.machine.copy()
        m.initialize(rpc_node.provision_state)
        if not m.is_valid_event(ir_states.VERBS.get(target, target)):
            raise exception.InvalidStateRequested(
                action=target, node=rpc_node.uuid,
                state=rpc_node.provision_state)

        if configdrive and target != ir_states.ACTIVE:
            msg = (_('Adding a config drive is only supported when setting '
                     'provision state to %s') % ir_states.ACTIVE)
            raise wsme.exc.ClientSideError(msg, status_code=400)

        # Note that there is a race condition. The node state(s) could change
        # by the time the RPC call is made and the TaskManager manager gets a
        # lock.
        if target == ir_states.ACTIVE:
            pecan.request.rpcapi.do_node_deploy(pecan.request.context,
                                                rpc_node.uuid, False,
                                                configdrive, topic)
        elif target == ir_states.REBUILD:
            pecan.request.rpcapi.do_node_deploy(pecan.request.context,
                                                rpc_node.uuid, True,
                                                None, topic)
        elif target == ir_states.DELETED:
            pecan.request.rpcapi.do_node_tear_down(
                pecan.request.context, rpc_node.uuid, topic)
        elif target == ir_states.VERBS['inspect']:
            pecan.request.rpcapi.inspect_hardware(
                pecan.request.context, rpc_node.uuid, topic=topic)
        elif target in (
                ir_states.VERBS['manage'], ir_states.VERBS['provide']):
            pecan.request.rpcapi.do_provisioning_action(
                pecan.request.context, rpc_node.uuid, target, topic)
        else:
            msg = (_('The requested action "%(action)s" could not be '
                     'understood.') % {'action': target})
            raise exception.InvalidStateRequested(message=msg)

        # Set the HTTP Location Header
        url_args = '/'.join([node_ident, 'states'])
        pecan.response.location = link.build_url('nodes', url_args)
Esempio n. 16
0
def _check_node_already_locked(query, query_by):
    no_reserv = None
    locked_ref = query.filter(models.Node.reservation != no_reserv).first()
    if locked_ref:
        raise exception.NodeLocked(node=locked_ref[query_by],
                                   host=locked_ref['reservation'])