def test_timeout(self, time_mock, get_lock_mock): # Add a finished node to confirm we don't try to timeout it time_mock.return_value = self.started_at session = db.get_session() finished_at = self.started_at + datetime.timedelta(seconds=60) with session.begin(): db.Node(uuid=self.uuid + '1', started_at=self.started_at, state=istate.States.waiting, finished_at=finished_at).save(session) CONF.set_override('timeout', 99) time_mock.return_value = (self.started_at + datetime.timedelta(seconds=100)) self.assertEqual([self.uuid], node_cache.clean_up()) res = [(row.state, row.finished_at, row.error) for row in db.model_query(db.Node).all()] self.assertEqual( [(istate.States.error, self.started_at + datetime.timedelta(seconds=100), 'Introspection timeout'), (istate.States.waiting, self.started_at + datetime.timedelta(seconds=60), None)], res) self.assertEqual([], db.model_query(db.Attribute).all()) self.assertEqual([], db.model_query(db.Option).all()) get_lock_mock.assert_called_once_with(self.uuid) get_lock_mock.return_value.acquire.assert_called_once_with()
def test_add_node(self): # Ensure previous node information is cleared session = db.get_session() with session.begin(): db.Node(uuid=self.node.uuid).save(session) db.Node(uuid='uuid2').save(session) db.Attribute(name='mac', value='11:22:11:22:11:22', uuid=self.uuid).save(session) res = node_cache.add_node(self.node.uuid, mac=self.macs, bmc_address='1.2.3.4', foo=None) self.assertEqual(self.uuid, res.uuid) self.assertTrue(time.time() - 60 < res.started_at < time.time() + 60) self.assertFalse(res._locked) res = (db.model_query(db.Node.uuid, db.Node.started_at).order_by(db.Node.uuid).all()) self.assertEqual(['1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e', 'uuid2'], [t.uuid for t in res]) self.assertTrue(time.time() - 60 < res[0].started_at < time.time() + 60) res = (db.model_query(db.Attribute.name, db.Attribute.value, db.Attribute.uuid). order_by(db.Attribute.name, db.Attribute.value).all()) self.assertEqual([('bmc_address', '1.2.3.4', self.uuid), ('mac', self.macs[0], self.uuid), ('mac', self.macs[1], self.uuid)], [(row.name, row.value, row.uuid) for row in res])
def test_add_node(self): # Ensure previous node information is cleared uuid2 = uuidutils.generate_uuid() session = db.get_session() with session.begin(): db.Node(uuid=self.node.uuid).save(session) db.Node(uuid=uuid2).save(session) db.Attribute(name='mac', value='11:22:11:22:11:22', uuid=self.uuid).save(session) node = node_cache.add_node(self.node.uuid, mac=self.macs, bmc_address='1.2.3.4', foo=None) self.assertEqual(self.uuid, node.uuid) self.assertTrue(time.time() - 60 < node.started_at < time.time() + 60) self.assertFalse(node._locked) res = set(db.model_query(db.Node.uuid, db.Node.started_at).all()) expected = {(node.uuid, node.started_at), (uuid2, None)} self.assertEqual(expected, res) res = (db.model_query(db.Attribute.name, db.Attribute.value, db.Attribute.uuid). order_by(db.Attribute.name, db.Attribute.value).all()) self.assertEqual([('bmc_address', '1.2.3.4', self.uuid), ('mac', self.macs[0], self.uuid), ('mac', self.macs[1], self.uuid)], [(row.name, row.value, row.uuid) for row in res])
def test_delete(self): rules.delete(self.uuid) self.assertEqual([(self.uuid2,)], db.model_query(db.Rule.uuid).all()) self.assertFalse(db.model_query(db.RuleCondition) .filter_by(rule=self.uuid).all()) self.assertFalse(db.model_query(db.RuleAction) .filter_by(rule=self.uuid).all())
def test_error(self): self.node_info.finished(error='boom') self.assertEqual((datetime.datetime(1, 1, 1), 'boom'), tuple(db.model_query(db.Node.finished_at, db.Node.error).first())) self.assertEqual([], db.model_query(db.Attribute).all()) self.assertEqual([], db.model_query(db.Option).all())
def set_option(self, name, value): """Set an option for a node.""" encoded = json.dumps(value) self.options[name] = value with db.ensure_transaction() as session: db.model_query(db.Option, session=session).filter_by( uuid=self.uuid, name=name).delete() db.Option(uuid=self.uuid, name=name, value=encoded).save( session)
def _delete_node(uuid, session=None): """Delete information about a node. :param uuid: Ironic node UUID :param session: optional existing database session """ with db.ensure_transaction(session) as session: for model in (db.Attribute, db.Option, db.Node): db.model_query(model, session=session).filter_by(uuid=uuid).delete()
def test_old_status(self): CONF.set_override('node_status_keep_time', 42) session = db.get_session() with session.begin(): db.model_query(db.Node).update( {'finished_at': time.time() - 100}) self.assertEqual([], node_cache.clean_up()) self.assertEqual([], db.model_query(db.Node).all())
def find_node(**attributes): """Find node in cache. :param attributes: attributes known about this node (like macs, BMC etc) also ironic client instance may be passed under 'ironic' :returns: structure NodeInfo with attributes ``uuid`` and ``created_at`` :raises: Error if node is not found """ ironic = attributes.pop('ironic', None) # NOTE(dtantsur): sorting is not required, but gives us predictability found = set() for (name, value) in sorted(attributes.items()): if not value: LOG.debug('Empty value for attribute %s', name) continue if not isinstance(value, list): value = [value] LOG.debug('Trying to use %s of value %s for node look up' % (name, value)) value_list = [] for v in value: value_list.append('name="%s" AND value="%s"' % (name, v)) stmt = ('select distinct uuid from attributes where ' + ' OR '.join(value_list)) rows = (db.model_query(db.Attribute.uuid).from_statement( text(stmt)).all()) if rows: found.update(item.uuid for item in rows) if not found: raise utils.NotFoundInCacheError(_( 'Could not find a node for attributes %s') % attributes) elif len(found) > 1: raise utils.Error(_( 'Multiple matching nodes found for attributes ' '%(attr)s: %(found)s') % {'attr': attributes, 'found': list(found)}, code=404) uuid = found.pop() row = (db.model_query(db.Node.started_at, db.Node.finished_at). filter_by(uuid=uuid).first()) if not row: raise utils.Error(_( 'Could not find node %s in introspection cache, ' 'probably it\'s not on introspection now') % uuid, code=404) if row.finished_at: raise utils.Error(_( 'Introspection for node %(node)s already finished on ' '%(finish)s') % {'node': uuid, 'finish': row.finished_at}) return NodeInfo(uuid=uuid, started_at=row.started_at, ironic=ironic)
def test_ok(self, time_mock): time_mock.return_value = 1000 self.assertFalse(node_cache.clean_up()) res = [tuple(row) for row in db.model_query( db.Node.finished_at, db.Node.error).all()] self.assertEqual([(None, None)], res) self.assertEqual(len(self.macs), db.model_query(db.Attribute).count()) self.assertEqual(1, db.model_query(db.Option).count())
def _delete_node(uuid, session=None): """Delete information about a node. :param uuid: Ironic node UUID :param session: optional existing database session """ with db.ensure_transaction(session) as session: db.model_query(db.Attribute, session=session).filter_by(node_uuid=uuid).delete() for model in (db.Option, db.IntrospectionData, db.Node): db.model_query(model, session=session).filter_by(uuid=uuid).delete()
def test_old_status(self): CONF.set_override('node_status_keep_time', 42) session = db.get_session() with session.begin(): db.model_query(db.Node).update({ 'finished_at': (datetime.datetime.utcnow() - datetime.timedelta(seconds=100)) }) self.assertEqual([], node_cache.clean_up()) self.assertEqual([], db.model_query(db.Node).all())
def test_no_timeout(self): CONF.set_override('timeout', 0) self.assertFalse(node_cache.clean_up()) res = [ tuple(row) for row in db.model_query(db.Node.finished_at, db.Node.error).all() ] self.assertEqual([(None, None)], res) self.assertEqual(len(self.macs), db.model_query(db.Attribute).count()) self.assertEqual(1, db.model_query(db.Option).count())
def test_no_timeout(self): CONF.set_override('timeout', 0) self.assertFalse(node_cache.clean_up()) res = [tuple(row) for row in db.model_query(db.Node.finished_at, db.Node.error).all()] self.assertEqual([(None, None)], res) self.assertEqual(len(self.macs), db.model_query(db.Attribute).count()) self.assertEqual(1, db.model_query(db.Option).count())
def test_success(self): self.node_info.finished() session = db.get_session() with session.begin(): self.assertEqual((42.0, None), tuple(db.model_query( db.Node.finished_at, db.Node.error).first())) self.assertEqual([], db.model_query(db.Attribute, session=session).all()) self.assertEqual([], db.model_query(db.Option, session=session).all())
def delete(uuid): """Delete a rule by its UUID.""" with db.ensure_transaction() as session: db.model_query(db.RuleAction, session=session).filter_by(rule=uuid).delete() db.model_query(db.RuleCondition, session=session) .filter_by(rule=uuid).delete() count = (db.model_query(db.Rule, session=session) .filter_by(uuid=uuid).delete()) if not count: raise utils.Error(_('Rule %s was not found') % uuid, code=404) LOG.info(_LI('Introspection rule %s was deleted'), uuid)
def test_success(self): self.node_info.finished() session = db.get_session() with session.begin(): self.assertEqual((datetime.datetime(1, 1, 1), None), tuple(db.model_query( db.Node.finished_at, db.Node.error).first())) self.assertEqual([], db.model_query(db.Attribute, session=session).all()) self.assertEqual([], db.model_query(db.Option, session=session).all())
def test_ok(self, time_mock, get_lock_mock): time_mock.return_value = 1000 self.assertFalse(node_cache.clean_up()) res = [ tuple(row) for row in db.model_query(db.Node.finished_at, db.Node.error).all() ] self.assertEqual([(None, None)], res) self.assertEqual(len(self.macs), db.model_query(db.Attribute).count()) self.assertEqual(1, db.model_query(db.Option).count()) self.assertFalse(get_lock_mock.called)
def clean_up(): """Clean up the cache. * Finish introspection for timed out nodes. * Drop outdated node status information. :return: list of timed out node UUID's """ if CONF.node_status_keep_time > 0: status_keep_threshold = ( timeutils.utcnow() - datetime.timedelta(seconds=CONF.node_status_keep_time)) with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter( db.Node.finished_at.isnot(None), db.Node.finished_at < status_keep_threshold).delete() timeout = CONF.timeout if timeout <= 0: return [] threshold = timeutils.utcnow() - datetime.timedelta(seconds=timeout) uuids = [ row.uuid for row in db.model_query(db.Node.uuid).filter( db.Node.started_at < threshold, db.Node.finished_at.is_( None)).all() ] if not uuids: return [] LOG.error('Introspection for nodes %s has timed out', uuids) for u in uuids: node_info = get_node(u, locked=True) try: if node_info.finished_at or node_info.started_at > threshold: continue if node_info.state != istate.States.waiting: LOG.error( 'Something went wrong, timeout occurred ' 'while introspection in "%s" state', node_info.state, node_info=node_info) node_info.finished(istate.Events.timeout, error='Introspection timeout') finally: node_info.release_lock() return uuids
def get_node(node_id, ironic=None, locked=False): """Get node from cache. :param node_id: node UUID or name. :param ironic: optional ironic client instance :param locked: if True, get a lock on node before fetching its data :returns: structure NodeInfo. """ if uuidutils.is_uuid_like(node_id): node = None uuid = node_id else: node = ir_utils.get_node(node_id, ironic=ironic) uuid = node.uuid if locked: lock = _get_lock(uuid) lock.acquire() else: lock = None try: row = db.model_query(db.Node).filter_by(uuid=uuid).first() if row is None: raise utils.Error(_('Could not find node %s in cache') % uuid, code=404) return NodeInfo.from_row(row, ironic=ironic, lock=lock, node=node) except Exception: with excutils.save_and_reraise_exception(): if lock is not None: lock.release()
def get_all(): """List all rules.""" query = db.model_query(db.Rule).order_by(db.Rule.created_at) return [IntrospectionRule(uuid=rule.uuid, actions=rule.actions, conditions=rule.conditions, description=rule.description) for rule in query]
def test_inconsistency(self): session = db.get_session() with session.begin(): (db.model_query(db.Node).filter_by(uuid=self.uuid).delete()) self.assertRaises(utils.Error, node_cache.find_node, bmc_address='1.2.3.4')
def test_already_finished(self): session = db.get_session() with session.begin(): (db.model_query(db.Node).filter_by(uuid=self.uuid). update({'finished_at': 42.0})) self.assertRaises(utils.Error, node_cache.find_node, bmc_address='1.2.3.4')
def active_macs(): """List all MAC's that are on introspection right now.""" return ({ x.value for x in db.model_query(db.Attribute.value).filter_by( name=MACS_ATTRIBUTE) })
def test_already_finished(self): session = db.get_session() with session.begin(): (db.model_query(db.Node).filter_by(uuid=self.uuid). update({'finished_at': datetime.datetime.utcnow()})) self.assertRaises(utils.Error, node_cache.find_node, bmc_address='1.2.3.4')
def test_inconsistency(self): session = db.get_session() with session.begin(): (db.model_query(db.Node).filter_by(uuid=self.uuid). delete()) self.assertRaises(utils.Error, node_cache.find_node, bmc_address='1.2.3.4')
def options(self): """Node introspection options as a dict.""" if self._options is None: rows = db.model_query(db.Option).filter_by( uuid=self.uuid) self._options = {row.name: json.loads(row.value) for row in rows} return self._options
def _row(self, session=None): """Get a row from the database with self.uuid and self.version_id""" try: # race condition if version_id changed outside of this node_info return db.model_query(db.Node, session=session).filter_by( uuid=self.uuid, version_id=self.version_id).one() except (orm_errors.NoResultFound, orm_errors.StaleDataError): raise utils.NodeStateRaceCondition(node_info=self)
def attributes(self): """Node look up attributes as a dict.""" if self._attributes is None: self._attributes = {} rows = db.model_query(db.Attribute).filter_by(node_uuid=self.uuid) for row in rows: self._attributes.setdefault(row.name, []).append(row.value) return self._attributes
def clean_up(): """Clean up the cache. * Finish introspection for timed out nodes. * Drop outdated node status information. :return: list of timed out node UUID's """ status_keep_threshold = (time.time() - CONF.node_status_keep_time) with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter( db.Node.finished_at.isnot(None), db.Node.finished_at < status_keep_threshold).delete() timeout = CONF.timeout if timeout <= 0: return [] threshold = time.time() - timeout uuids = [ row.uuid for row in db.model_query(db.Node.uuid, session=session).filter( db.Node.started_at < threshold, db.Node.finished_at.is_( None)).all() ] if not uuids: return [] LOG.error(_LE('Introspection for nodes %s has timed out'), uuids) for u in uuids: node_info = get_node(u, locked=True) try: if node_info.finished_at or node_info.started_at > threshold: continue db.model_query(db.Node, session=session).filter_by(uuid=u).update({ 'finished_at': time.time(), 'error': 'Introspection timeout' }) db.model_query(db.Attribute, session=session).filter_by(uuid=u).delete() db.model_query(db.Option, session=session).filter_by(uuid=u).delete() finally: node_info.release_lock() return uuids
def finished(self, error=None): """Record status for this node. Also deletes look up attributes from the cache. :param error: error message """ self.release_lock() self.finished_at = timeutils.utcnow() self.error = error with db.ensure_transaction() as session: self._commit(finished_at=self.finished_at, error=self.error) db.model_query(db.Attribute, session=session).filter_by( node_uuid=self.uuid).delete() db.model_query(db.Option, session=session).filter_by( uuid=self.uuid).delete()
def test_model_query(self, mock_reader): mock_session = mock_reader.return_value fake_query = mock_session.query.return_value query = db.model_query('db.Node') mock_reader.assert_called_once_with() mock_session.query.assert_called_once_with('db.Node') self.assertEqual(fake_query, query)
def attributes(self): """Node look up attributes as a dict.""" if self._attributes is None: self._attributes = {} rows = db.model_query(db.Attribute).filter_by( node_uuid=self.uuid) for row in rows: self._attributes.setdefault(row.name, []).append(row.value) return self._attributes
def version_id(self): """Get the version id""" if self._version_id is None: row = db.model_query(db.Node).get(self.uuid) if row is None: raise utils.NotFoundInCacheError(_('Node not found in the ' 'cache'), node_info=self) self._version_id = row.version_id return self._version_id
def test_set_race(self): with db.ensure_transaction() as session: row = db.model_query(db.Node, session=session).get(self.node_info.uuid) row.update({'version_id': uuidutils.generate_uuid()}) row.save(session) six.assertRaisesRegex(self, utils.NodeStateRaceCondition, 'Node state mismatch', self.node_info._set_state, istate.States.finished)
def test_commit(self): current_time = timeutils.utcnow() self.node_info.started_at = self.node_info.finished_at = current_time self.node_info.error = "Boo!" self.node_info.commit() row = db.model_query(db.Node).get(self.node_info.uuid) self.assertEqual(self.node_info.started_at, row.started_at) self.assertEqual(self.node_info.finished_at, row.finished_at) self.assertEqual(self.node_info.error, row.error)
def test_set_race(self): with db.ensure_transaction() as session: row = db.model_query(db.Node, session=session).get( self.node_info.uuid) row.update({'version_id': uuidutils.generate_uuid()}) row.save(session) six.assertRaisesRegex(self, utils.NodeStateRaceCondition, 'Node state mismatch', self.node_info._set_state, istate.States.finished)
def get(uuid): """Get a rule by its UUID.""" try: rule = db.model_query(db.Rule).filter_by(uuid=uuid).one() except orm.exc.NoResultFound: raise utils.Error(_('Rule %s was not found') % uuid, code=404) return IntrospectionRule(uuid=rule.uuid, actions=rule.actions, conditions=rule.conditions, description=rule.description)
def test_timeout(self, time_mock): # Add a finished node to confirm we don't try to timeout it time_mock.return_value = self.started_at session = db.get_session() with session.begin(): db.Node(uuid=self.uuid + '1', started_at=self.started_at, finished_at=self.started_at + 60).save(session) CONF.set_override('timeout', 99) time_mock.return_value = (self.started_at + 100) self.assertEqual([self.uuid], node_cache.clean_up()) res = [(row.finished_at, row.error) for row in db.model_query(db.Node).all()] self.assertEqual([(self.started_at + 100, 'Introspection timeout'), (self.started_at + 60, None)], res) self.assertEqual([], db.model_query(db.Attribute).all()) self.assertEqual([], db.model_query(db.Option).all())
def test__delete_node(self): session = db.get_session() with session.begin(): db.Node(uuid=self.node.uuid).save(session) db.Attribute(name='mac', value='11:22:11:22:11:22', uuid=self.uuid).save(session) data = {'s': 'value', 'b': True, 'i': 42} encoded = json.dumps(data) db.Option(uuid=self.uuid, name='name', value=encoded).save(session) node_cache._delete_node(self.uuid) session = db.get_session() row_node = db.model_query(db.Node).filter_by(uuid=self.uuid).first() self.assertIsNone(row_node) row_attribute = db.model_query( db.Attribute).filter_by(uuid=self.uuid).first() self.assertIsNone(row_attribute) row_option = db.model_query( db.Option).filter_by(uuid=self.uuid).first() self.assertIsNone(row_option)
def finished(self, event, error=None): """Record status for this node and process a terminal transition. Also deletes look up attributes from the cache. :param event: the event to process :param error: error message """ self.release_lock() self.finished_at = timeutils.utcnow() self.error = error with db.ensure_transaction() as session: self.fsm_event(event) self._commit(finished_at=self.finished_at, error=self.error) db.model_query(db.Attribute, session=session).filter_by( node_uuid=self.uuid).delete() db.model_query(db.Option, session=session).filter_by( uuid=self.uuid).delete()
def test_introspection_statuses(self): self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) # NOTE(zhenguo): only test finished=False here, as we don't know # other nodes status in this thread. statuses = self.call_get_statuses().get('introspection') self.assertIn(self._fake_status(finished=False), statuses) # check we've got 1 status with a limit of 1 statuses = self.call_get_statuses(limit=1).get('introspection') self.assertEqual(1, len(statuses)) all_statuses = self.call_get_statuses().get('introspection') marker_statuses = self.call_get_statuses(marker=self.uuid, limit=1).get('introspection') marker_index = all_statuses.index(self.call_get_status(self.uuid)) # marker is the last row on previous page self.assertEqual(all_statuses[marker_index + 1:marker_index + 2], marker_statuses) self.call_continue(self.data) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished) # fetch all statuses and db nodes to assert pagination statuses = self.call_get_statuses().get('introspection') nodes = db.model_query(db.Node).order_by( db.Node.started_at.desc()).all() # assert ordering self.assertEqual([node.uuid for node in nodes], [status_.get('uuid') for status_ in statuses]) # assert pagination half = len(nodes) // 2 marker = nodes[half].uuid statuses = self.call_get_statuses(marker=marker).get('introspection') self.assertEqual([node.uuid for node in nodes[half + 1:]], [status_.get('uuid') for status_ in statuses]) # assert status links work self.assertEqual([ self.call_get_status(status_.get('uuid')) for status_ in statuses ], [ self.call( 'GET', urllib.parse.urlparse( status_.get('links')[0].get('href')).path).json() for status_ in statuses ])
def get_node(uuid, ironic=None): """Get node from cache by it's UUID. :param uuid: node UUID. :param ironic: optional ironic client instance :returns: structure NodeInfo. """ row = db.model_query(db.Node).filter_by(uuid=uuid).first() if row is None: raise utils.Error(_('Could not find node %s in cache') % uuid, code=404) return NodeInfo.from_row(row, ironic=ironic)
def clean_up(): """Clean up the cache. * Finish introspection for timed out nodes. * Drop outdated node status information. :return: list of timed out node UUID's """ status_keep_threshold = (time.time() - CONF.node_status_keep_time) with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter( db.Node.finished_at.isnot(None), db.Node.finished_at < status_keep_threshold).delete() timeout = CONF.timeout if timeout <= 0: return [] threshold = time.time() - timeout uuids = [row.uuid for row in db.model_query(db.Node.uuid, session=session).filter( db.Node.started_at < threshold, db.Node.finished_at.is_(None)).all()] if not uuids: return [] LOG.error(_LE('Introspection for nodes %s has timed out'), uuids) for u in uuids: node_info = get_node(u, locked=True) try: if node_info.finished_at or node_info.started_at > threshold: continue db.model_query(db.Node, session=session).filter_by( uuid=u).update({'finished_at': time.time(), 'error': 'Introspection timeout'}) db.model_query(db.Attribute, session=session).filter_by( uuid=u).delete() db.model_query(db.Option, session=session).filter_by( uuid=u).delete() finally: node_info.release_lock() return uuids
def test__delete_node(self): session = db.get_session() with session.begin(): db.Node(uuid=self.node.uuid).save(session) db.Attribute(name='mac', value='11:22:11:22:11:22', uuid=self.uuid).save(session) data = {'s': 'value', 'b': True, 'i': 42} encoded = json.dumps(data) db.Option(uuid=self.uuid, name='name', value=encoded).save( session) node_cache._delete_node(self.uuid) session = db.get_session() row_node = db.model_query(db.Node).filter_by( uuid=self.uuid).first() self.assertIsNone(row_node) row_attribute = db.model_query(db.Attribute).filter_by( uuid=self.uuid).first() self.assertIsNone(row_attribute) row_option = db.model_query(db.Option).filter_by( uuid=self.uuid).first() self.assertIsNone(row_option)
def delete_all(): """Delete all rules.""" with db.ensure_transaction() as session: db.model_query(db.RuleAction, session=session).delete() db.model_query(db.RuleCondition, session=session).delete() db.model_query(db.Rule, session=session).delete() LOG.info(_LI('All introspection rules were deleted'))
def test_add_node(self): # Ensure previous node information is cleared uuid2 = uuidutils.generate_uuid() session = db.get_session() with session.begin(): db.Node(uuid=self.node.uuid, state=istate.States.starting).save(session) db.Node(uuid=uuid2, state=istate.States.starting).save(session) db.Attribute(uuid=uuidutils.generate_uuid(), name='mac', value='11:22:11:22:11:22', node_uuid=self.uuid).save(session) node = node_cache.add_node(self.node.uuid, istate.States.starting, mac=self.macs, bmc_address='1.2.3.4', foo=None) self.assertEqual(self.uuid, node.uuid) self.assertTrue( (datetime.datetime.utcnow() - datetime.timedelta(seconds=60) < node.started_at < datetime.datetime.utcnow() + datetime.timedelta(seconds=60))) self.assertFalse(node._locked) res = set(db.model_query(db.Node.uuid, db.Node.started_at).all()) expected = {(node.uuid, node.started_at), (uuid2, None)} self.assertEqual(expected, res) res = (db.model_query(db.Attribute.name, db.Attribute.value, db.Attribute.node_uuid).order_by( db.Attribute.name, db.Attribute.value).all()) self.assertEqual([('bmc_address', '1.2.3.4', self.uuid), ('mac', self.macs[0], self.uuid), ('mac', self.macs[1], self.uuid), ('mac', self.macs[2], self.uuid)], [(row.name, row.value, row.node_uuid) for row in res])
def get_node_list(ironic=None, marker=None, limit=None): """Get node list from the cache. The list of the nodes is ordered based on the (started_at, uuid) attribute pair, newer items first. :param ironic: optional ironic client instance :param marker: pagination marker (an UUID or None) :param limit: pagination limit; None for default CONF.api_max_limit :returns: a list of NodeInfo instances. """ if marker is not None: # uuid marker -> row marker for pagination marker = db.model_query(db.Node).get(marker) if marker is None: raise utils.Error(_('Node not found for marker: %s') % marker, code=404) rows = db.model_query(db.Node) # ordered based on (started_at, uuid); newer first rows = db_utils.paginate_query(rows, db.Node, limit, ('started_at', 'uuid'), marker=marker, sort_dir='desc') return [NodeInfo.from_row(row, ironic=ironic) for row in rows]
def test_add_attribute(self): session = db.get_session() with session.begin(): db.Node(uuid=self.node.uuid).save(session) node_info = node_cache.NodeInfo(uuid=self.uuid, started_at=42) node_info.add_attribute('key', 'value') res = db.model_query(db.Attribute.name, db.Attribute.value, db.Attribute.uuid, session=session) res = res.order_by(db.Attribute.name, db.Attribute.value).all() self.assertEqual([('key', 'value', self.uuid)], [tuple(row) for row in res]) self.assertRaises(utils.Error, node_info.add_attribute, 'key', 'value') # check that .attributes got invalidated and reloaded self.assertEqual({'key': ['value']}, node_info.attributes)