def test_ensure_transaction_session(self, mock_writer): mock_session = mock.MagicMock() with db.ensure_transaction(session=mock_session) as session: self.assertFalse(mock_writer.called) mock_session.begin.assert_called_once_with(subtransactions=True) self.assertEqual(mock_session, session)
def test_ensure_transaction_new_session(self, mock_writer): mock_session = mock_writer.return_value with db.ensure_transaction() as session: mock_writer.assert_called_once_with() mock_session.begin.assert_called_once_with(subtransactions=True) self.assertEqual(mock_session, session)
def start_introspection(uuid, **kwargs): """Start the introspection of a node. If a node_info record exists in the DB, a start transition is used rather than dropping the record in order to check for the start transition validity in particular node state. :param uuid: Ironic node UUID :param kwargs: passed on to add_node() :raises: NodeStateInvalidEvent in case the start transition is invalid in the current node state :raises: NodeStateRaceCondition if a mismatch was detected between the node_info cache and the DB :returns: NodeInfo """ with db.ensure_transaction(): node_info = NodeInfo(uuid) # check that the start transition is possible try: node_info.fsm_event(istate.Events.start) except utils.NotFoundInCacheError: # node not found while in the fsm_event handler LOG.debug('Node missing in the cache; adding it now', node_info=node_info) state = istate.States.starting else: state = node_info.state return add_node(uuid, state, **kwargs)
def add_node(uuid, **attributes): """Store information about a node under introspection. All existing information about this node is dropped. Empty values are skipped. :param uuid: Ironic node UUID :param attributes: attributes known about this node (like macs, BMC etc); also ironic client instance may be passed under 'ironic' :returns: NodeInfo """ started_at = time.time() with db.ensure_transaction() as session: _delete_node(uuid) db.Node(uuid=uuid, started_at=started_at).save(session) node_info = NodeInfo(uuid=uuid, started_at=started_at, ironic=attributes.pop('ironic', None)) for (name, value) in attributes.items(): if not value: continue node_info.add_attribute(name, value, session=session) return node_info
def add_attribute(self, name, value, session=None): """Store look up attribute for a node in the database. :param name: attribute name :param value: attribute value or list of possible values :param session: optional existing database session :raises: Error if attributes values are already in database """ if not isinstance(value, list): value = [value] with db.ensure_transaction(session) as session: try: for v in value: db.Attribute(name=name, value=v, uuid=self.uuid).save( session) except db_exc.DBDuplicateEntry as exc: LOG.error(_LE('Database integrity error %s during ' 'adding attributes'), exc, node_info=self) raise utils.Error(_( 'Some or all of %(name)s\'s %(value)s are already ' 'on introspection') % {'name': name, 'value': value}, node_info=self) # Invalidate attributes so they're loaded on next usage self._attributes = None
def add_node(uuid, state, manage_boot=True, **attributes): """Store information about a node under introspection. All existing information about this node is dropped. Empty values are skipped. :param uuid: Ironic node UUID :param state: The initial state of the node :param manage_boot: whether to manage boot for this node :param attributes: attributes known about this node (like macs, BMC etc); also ironic client instance may be passed under 'ironic' :returns: NodeInfo """ started_at = timeutils.utcnow() with db.ensure_transaction() as session: _delete_node(uuid) version_id = uuidutils.generate_uuid() db.Node(uuid=uuid, state=state, version_id=version_id, started_at=started_at, manage_boot=manage_boot).save(session) node_info = NodeInfo(uuid=uuid, state=state, started_at=started_at, version_id=version_id, manage_boot=manage_boot, ironic=attributes.pop('ironic', None)) for (name, value) in attributes.items(): if not value: continue node_info.add_attribute(name, value, session=session) return node_info
def _commit(self, **fields): """Commit the fields into the DB.""" LOG.debug('Committing fields: %s', fields, node_info=self) with db.ensure_transaction() as session: self._set_version_id(uuidutils.generate_uuid(), session) row = self._row(session) row.update(fields)
def add_attribute(self, name, value, session=None): """Store look up attribute for a node in the database. :param name: attribute name :param value: attribute value or list of possible values :param session: optional existing database session :raises: Error if attributes values are already in database """ if not isinstance(value, list): value = [value] with db.ensure_transaction(session) as session: try: for v in value: db.Attribute(name=name, value=v, uuid=self.uuid).save(session) except db_exc.DBDuplicateEntry as exc: LOG.error(_LE('Database integrity error %s during ' 'adding attributes'), exc, node_info=self) raise utils.Error( _('Some or all of %(name)s\'s %(value)s are already ' 'on introspection') % { 'name': name, 'value': value }, node_info=self) # Invalidate attributes so they're loaded on next usage self._attributes = None
def create(conditions_json, actions_json, uuid=None, description=None, scope=None): """Create a new rule in database. :param conditions_json: list of dicts with the following keys: * op - operator * field - JSON path to field to compare Other keys are stored as is. :param actions_json: list of dicts with the following keys: * action - action type Other keys are stored as is. :param uuid: rule UUID, will be generated if empty :param description: human-readable rule description :param scope: if scope on node and rule matches, rule applies; if its empty, rule applies to all nodes. :returns: new IntrospectionRule object :raises: utils.Error on failure """ uuid = uuid or uuidutils.generate_uuid() LOG.debug('Creating rule %(uuid)s with description "%(descr)s", ' 'conditions %(conditions)s, scope "%(scope)s"' ' and actions %(actions)s', {'uuid': uuid, 'descr': description, 'scope': scope, 'conditions': conditions_json, 'actions': actions_json}) conditions = _validate_conditions(conditions_json) actions = _validate_actions(actions_json) try: with db.ensure_transaction() as session: rule = db.Rule(uuid=uuid, description=description, disabled=False, created_at=timeutils.utcnow(), scope=scope) for field, op, multiple, invert, params in conditions: rule.conditions.append(db.RuleCondition(op=op, field=field, multiple=multiple, invert=invert, params=params)) for action, params in actions: rule.actions.append(db.RuleAction(action=action, params=params)) rule.save(session) except db_exc.DBDuplicateEntry as exc: LOG.error('Database integrity error %s when ' 'creating a rule', exc) raise utils.Error(_('Rule with UUID %s already exists') % uuid, code=409) LOG.info('Created rule %(uuid)s with description "%(descr)s" ' 'and scope "%(scope)s"', {'uuid': uuid, 'descr': description, 'scope': scope}) return IntrospectionRule(uuid=uuid, conditions=rule.conditions, actions=rule.actions, description=description, scope=rule.scope)
def delete_all(): """Delete all rules.""" with db.ensure_transaction() as session: db.model_query(db.RuleAction, session=session).delete() db.model_query(db.RuleCondition, session=session).delete() db.model_query(db.Rule, session=session).delete() LOG.info(_LI('All introspection rules were deleted'))
def set_option(self, name, value): """Set an option for a node.""" encoded = json.dumps(value) self.options[name] = value with db.ensure_transaction() as session: db.model_query(db.Option, session=session).filter_by(uuid=self.uuid, name=name).delete() db.Option(uuid=self.uuid, name=name, value=encoded).save(session)
def set_option(self, name, value): """Set an option for a node.""" encoded = json.dumps(value) self.options[name] = value with db.ensure_transaction() as session: db.model_query(db.Option, session=session).filter_by( uuid=self.uuid, name=name).delete() db.Option(uuid=self.uuid, name=name, value=encoded).save( session)
def test_set_race(self): with db.ensure_transaction() as session: row = db.model_query(db.Node, session=session).get(self.node_info.uuid) row.update({'version_id': uuidutils.generate_uuid()}) row.save(session) six.assertRaisesRegex(self, utils.NodeStateRaceCondition, 'Node state mismatch', self.node_info._set_state, istate.States.finished)
def _delete_node(uuid, session=None): """Delete information about a node. :param uuid: Ironic node UUID :param session: optional existing database session """ with db.ensure_transaction(session) as session: for model in (db.Attribute, db.Option, db.Node): db.model_query(model, session=session).filter_by(uuid=uuid).delete()
def test_set_race(self): with db.ensure_transaction() as session: row = db.model_query(db.Node, session=session).get( self.node_info.uuid) row.update({'version_id': uuidutils.generate_uuid()}) row.save(session) six.assertRaisesRegex(self, utils.NodeStateRaceCondition, 'Node state mismatch', self.node_info._set_state, istate.States.finished)
def create(conditions_json, actions_json, uuid=None, description=None): """Create a new rule in database. :param conditions_json: list of dicts with the following keys: * op - operator * field - JSON path to field to compare Other keys are stored as is. :param actions_json: list of dicts with the following keys: * action - action type Other keys are stored as is. :param uuid: rule UUID, will be generated if empty :param description: human-readable rule description :returns: new IntrospectionRule object :raises: utils.Error on failure """ uuid = uuid or uuidutils.generate_uuid() LOG.debug('Creating rule %(uuid)s with description "%(descr)s", ' 'conditions %(conditions)s and actions %(actions)s', {'uuid': uuid, 'descr': description, 'conditions': conditions_json, 'actions': actions_json}) conditions = _validate_conditions(conditions_json) actions = _validate_actions(actions_json) try: with db.ensure_transaction() as session: rule = db.Rule(uuid=uuid, description=description, disabled=False, created_at=timeutils.utcnow()) for field, op, multiple, invert, params in conditions: rule.conditions.append(db.RuleCondition(op=op, field=field, multiple=multiple, invert=invert, params=params)) for action, params in actions: rule.actions.append(db.RuleAction(action=action, params=params)) rule.save(session) except db_exc.DBDuplicateEntry as exc: LOG.error('Database integrity error %s when ' 'creating a rule', exc) raise utils.Error(_('Rule with UUID %s already exists') % uuid, code=409) LOG.info('Created rule %(uuid)s with description "%(descr)s"', {'uuid': uuid, 'descr': description}) return IntrospectionRule(uuid=uuid, conditions=rule.conditions, actions=rule.actions, description=description)
def delete(uuid): """Delete a rule by its UUID.""" with db.ensure_transaction() as session: db.model_query(db.RuleAction, session=session).filter_by(rule=uuid).delete() db.model_query(db.RuleCondition, session=session) .filter_by(rule=uuid).delete() count = (db.model_query(db.Rule, session=session) .filter_by(uuid=uuid).delete()) if not count: raise utils.Error(_('Rule %s was not found') % uuid, code=404) LOG.info(_LI('Introspection rule %s was deleted'), uuid)
def clean_up(): """Clean up the cache. * Finish introspection for timed out nodes. * Drop outdated node status information. :return: list of timed out node UUID's """ status_keep_threshold = (time.time() - CONF.node_status_keep_time) with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter( db.Node.finished_at.isnot(None), db.Node.finished_at < status_keep_threshold).delete() timeout = CONF.timeout if timeout <= 0: return [] threshold = time.time() - timeout uuids = [ row.uuid for row in db.model_query(db.Node.uuid, session=session).filter( db.Node.started_at < threshold, db.Node.finished_at.is_( None)).all() ] if not uuids: return [] LOG.error(_LE('Introspection for nodes %s has timed out'), uuids) for u in uuids: node_info = get_node(u, locked=True) try: if node_info.finished_at or node_info.started_at > threshold: continue db.model_query(db.Node, session=session).filter_by(uuid=u).update({ 'finished_at': time.time(), 'error': 'Introspection timeout' }) db.model_query(db.Attribute, session=session).filter_by(uuid=u).delete() db.model_query(db.Option, session=session).filter_by(uuid=u).delete() finally: node_info.release_lock() return uuids
def clean_up(): """Clean up the cache. * Finish introspection for timed out nodes. * Drop outdated node status information. :return: list of timed out node UUID's """ if CONF.node_status_keep_time > 0: status_keep_threshold = ( timeutils.utcnow() - datetime.timedelta(seconds=CONF.node_status_keep_time)) with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter( db.Node.finished_at.isnot(None), db.Node.finished_at < status_keep_threshold).delete() timeout = CONF.timeout if timeout <= 0: return [] threshold = timeutils.utcnow() - datetime.timedelta(seconds=timeout) uuids = [ row.uuid for row in db.model_query(db.Node.uuid).filter( db.Node.started_at < threshold, db.Node.finished_at.is_( None)).all() ] if not uuids: return [] LOG.error('Introspection for nodes %s has timed out', uuids) for u in uuids: node_info = get_node(u, locked=True) try: if node_info.finished_at or node_info.started_at > threshold: continue if node_info.state != istate.States.waiting: LOG.error( 'Something went wrong, timeout occurred ' 'while introspection in "%s" state', node_info.state, node_info=node_info) node_info.finished(istate.Events.timeout, error='Introspection timeout') finally: node_info.release_lock() return uuids
def add_attribute(self, name, value, session=None): """Store look up attribute for a node in the database. :param name: attribute name :param value: attribute value or list of possible values :param session: optional existing database session """ if not isinstance(value, list): value = [value] with db.ensure_transaction(session) as session: for v in value: db.Attribute(uuid=uuidutils.generate_uuid(), name=name, value=v, node_uuid=self.uuid).save(session) # Invalidate attributes so they're loaded on next usage self._attributes = None
def clean_up(): """Clean up the cache. * Finish introspection for timed out nodes. * Drop outdated node status information. :return: list of timed out node UUID's """ status_keep_threshold = (time.time() - CONF.node_status_keep_time) with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter( db.Node.finished_at.isnot(None), db.Node.finished_at < status_keep_threshold).delete() timeout = CONF.timeout if timeout <= 0: return [] threshold = time.time() - timeout uuids = [row.uuid for row in db.model_query(db.Node.uuid, session=session).filter( db.Node.started_at < threshold, db.Node.finished_at.is_(None)).all()] if not uuids: return [] LOG.error(_LE('Introspection for nodes %s has timed out'), uuids) for u in uuids: node_info = get_node(u, locked=True) try: if node_info.finished_at or node_info.started_at > threshold: continue db.model_query(db.Node, session=session).filter_by( uuid=u).update({'finished_at': time.time(), 'error': 'Introspection timeout'}) db.model_query(db.Attribute, session=session).filter_by( uuid=u).delete() db.model_query(db.Option, session=session).filter_by( uuid=u).delete() finally: node_info.release_lock() return uuids
def finished(self, error=None): """Record status for this node. Also deletes look up attributes from the cache. :param error: error message """ self.release_lock() self.finished_at = timeutils.utcnow() self.error = error with db.ensure_transaction() as session: self._commit(finished_at=self.finished_at, error=self.error) db.model_query(db.Attribute, session=session).filter_by( node_uuid=self.uuid).delete() db.model_query(db.Option, session=session).filter_by( uuid=self.uuid).delete()
def finished(self, error=None): """Record status for this node. Also deletes look up attributes from the cache. :param error: error message """ self.finished_at = time.time() self.error = error with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter_by( uuid=self.uuid).update( {'finished_at': self.finished_at, 'error': error}) db.model_query(db.Attribute, session=session).filter_by( uuid=self.uuid).delete() db.model_query(db.Option, session=session).filter_by( uuid=self.uuid).delete()
def finished(self, error=None): """Record status for this node. Also deletes look up attributes from the cache. :param error: error message """ self.release_lock() self.finished_at = time.time() self.error = error with db.ensure_transaction() as session: db.model_query(db.Node, session=session).filter_by( uuid=self.uuid).update( {'finished_at': self.finished_at, 'error': error}) db.model_query(db.Attribute, session=session).filter_by( uuid=self.uuid).delete() db.model_query(db.Option, session=session).filter_by( uuid=self.uuid).delete()
def store_introspection_data(node_id, introspection_data, processed=True): """Store introspection data for this node. :param node_id: node UUID. :param introspection_data: A dictionary of introspection data :param processed: Specify the type of introspected data, set to False indicates the data is unprocessed. """ with db.ensure_transaction() as session: record = db.model_query(db.IntrospectionData, session=session).filter_by( uuid=node_id, processed=processed).first() if record is None: row = db.IntrospectionData() row.update({'uuid': node_id, 'processed': processed, 'data': introspection_data}) session.add(row) else: record.update({'data': introspection_data}) session.flush()
def finished(self, event, error=None): """Record status for this node and process a terminal transition. Also deletes look up attributes from the cache. :param event: the event to process :param error: error message """ self.release_lock() self.finished_at = timeutils.utcnow() self.error = error with db.ensure_transaction() as session: self.fsm_event(event) self._commit(finished_at=self.finished_at, error=self.error) db.model_query(db.Attribute, session=session).filter_by( node_uuid=self.uuid).delete() db.model_query(db.Option, session=session).filter_by( uuid=self.uuid).delete()
def test_edge_state_transitions(self): """Assert state transitions work as expected in edge conditions.""" # multiple introspect calls self.call_introspect(self.uuid) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) # an error -start-> starting state transition is possible self.call_abort_introspect(self.uuid) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) # double abort works self.call_abort_introspect(self.uuid) status = self.call_get_status(self.uuid) error = status['error'] self.check_status(status, finished=True, state=istate.States.error, error=error) self.call_abort_introspect(self.uuid) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.error, error=error) # preventing stale data race condition # waiting -> processing is a strict state transition self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) row = self.db_row() row.state = istate.States.processing with db.ensure_transaction() as session: row.save(session) self.call_continue(self.data, expect_error=400) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.error, error=mock.ANY) self.assertIn('no defined transition', status['error']) # multiple reapply calls self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_continue(self.data) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_reapply(self.uuid) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished, error=None) self.call_reapply(self.uuid) # assert an finished -reapply-> reapplying -> finished state transition status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished, error=None)
def test_set(self): with db.ensure_transaction() as session: self.node_info._set_version_id(uuidutils.generate_uuid(), session) row = db.model_query(db.Node).get(self.node_info.uuid) self.assertEqual(self.node_info.version_id, row.version_id)
def create(conditions_json, actions_json, uuid=None, description=None): """Create a new rule in database. :param conditions_json: list of dicts with the following keys: * op - operator * field - JSON path to field to compare Other keys are stored as is. :param actions_json: list of dicts with the following keys: * action - action type Other keys are stored as is. :param uuid: rule UUID, will be generated if empty :param description: human-readable rule description :returns: new IntrospectionRule object :raises: utils.Error on failure """ uuid = uuid or uuidutils.generate_uuid() LOG.debug('Creating rule %(uuid)s with description "%(descr)s", ' 'conditions %(conditions)s and actions %(actions)s', {'uuid': uuid, 'descr': description, 'conditions': conditions_json, 'actions': actions_json}) try: jsonschema.validate(conditions_json, conditions_schema()) except jsonschema.ValidationError as exc: raise utils.Error(_('Validation failed for conditions: %s') % exc) try: jsonschema.validate(actions_json, actions_schema()) except jsonschema.ValidationError as exc: raise utils.Error(_('Validation failed for actions: %s') % exc) cond_mgr = plugins_base.rule_conditions_manager() act_mgr = plugins_base.rule_actions_manager() conditions = [] for cond_json in conditions_json: field = cond_json['field'] try: jsonpath.parse(field) except Exception as exc: raise utils.Error(_('Unable to parse field JSON path %(field)s: ' '%(error)s') % {'field': field, 'error': exc}) plugin = cond_mgr[cond_json['op']].obj params = {k: v for k, v in cond_json.items() if k not in ('op', 'field', 'multiple')} try: plugin.validate(params) except ValueError as exc: raise utils.Error(_('Invalid parameters for operator %(op)s: ' '%(error)s') % {'op': cond_json['op'], 'error': exc}) conditions.append((cond_json['field'], cond_json['op'], cond_json.get('multiple', 'any'), params)) actions = [] for action_json in actions_json: plugin = act_mgr[action_json['action']].obj params = {k: v for k, v in action_json.items() if k != 'action'} try: plugin.validate(params) except ValueError as exc: raise utils.Error(_('Invalid parameters for action %(act)s: ' '%(error)s') % {'act': action_json['action'], 'error': exc}) actions.append((action_json['action'], params)) try: with db.ensure_transaction() as session: rule = db.Rule(uuid=uuid, description=description, disabled=False, created_at=timeutils.utcnow()) for field, op, multiple, params in conditions: rule.conditions.append(db.RuleCondition(op=op, field=field, multiple=multiple, params=params)) for action, params in actions: rule.actions.append(db.RuleAction(action=action, params=params)) rule.save(session) except db_exc.DBDuplicateEntry as exc: LOG.error(_LE('Database integrity error %s when ' 'creating a rule'), exc) raise utils.Error(_('Rule with UUID %s already exists') % uuid, code=409) LOG.info(_LI('Created rule %(uuid)s with description "%(descr)s"'), {'uuid': uuid, 'descr': description}) return IntrospectionRule(uuid=uuid, conditions=rule.conditions, actions=rule.actions, description=description)
def test_edge_state_transitions(self, get_mock, store_mock): """Assert state transitions work as expected in edge conditions.""" cfg.CONF.set_override('store_data', 'swift', 'processing') # ramdisk data copy # please mind the data is changed during processing ramdisk_data = json.dumps(copy.deepcopy(self.data)) get_mock.return_value = ramdisk_data # multiple introspect calls self.call_introspect(self.uuid) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) # an error -start-> starting state transition is possible self.call_abort_introspect(self.uuid) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) status = self.call_get_status(self.uuid) self.check_status(status, finished=False, state=istate.States.waiting) # double abort works self.call_abort_introspect(self.uuid) status = self.call_get_status(self.uuid) error = status['error'] self.check_status(status, finished=True, state=istate.States.error, error=error) self.call_abort_introspect(self.uuid) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.error, error=error) # preventing stale data race condition # waiting -> processing is a strict state transition self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) row = self.db_row() row.state = istate.States.processing with db.ensure_transaction() as session: row.save(session) self.call_continue(self.data, expect_error=400) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.error, error=mock.ANY) self.assertIn('no defined transition', status['error']) # multiple reapply calls self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_continue(self.data) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_reapply(self.uuid) status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished, error=None) self.call_reapply(self.uuid) # assert an finished -reapply-> reapplying -> finished state transition status = self.call_get_status(self.uuid) self.check_status(status, finished=True, state=istate.States.finished, error=None)
def test_edge_state_transitions(self, get_mock, store_mock): """Assert state transitions work as expected in edge conditions.""" cfg.CONF.set_override('store_data', 'swift', 'processing') # ramdisk data copy # please mind the data is changed during processing ramdisk_data = json.dumps(copy.deepcopy(self.data)) get_mock.return_value = ramdisk_data # multiple introspect calls self.call_introspect(self.uuid) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) # TODO(milan): switch to API once the introspection status # endpoint exposes the state information row = self.db_row() self.assertEqual(istate.States.waiting, row.state) # an error -start-> starting state transition is possible self.call_abort_introspect(self.uuid) self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) row = self.db_row() self.assertEqual(istate.States.waiting, row.state) # double abort works self.call_abort_introspect(self.uuid) row = self.db_row() version_id = row.version_id error = row.error self.assertEqual(istate.States.error, row.state) self.call_abort_introspect(self.uuid) row = self.db_row() self.assertEqual(istate.States.error, row.state) # assert the error didn't change self.assertEqual(error, row.error) self.assertEqual(version_id, row.version_id) # preventing stale data race condition # waiting -> processing is a strict state transition self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) row = self.db_row() row.state = istate.States.processing with db.ensure_transaction() as session: row.save(session) self.call_continue(self.data, expect_error=400) row = self.db_row() self.assertEqual(istate.States.error, row.state) self.assertIn('no defined transition', row.error) # multiple reapply calls self.call_introspect(self.uuid) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_continue(self.data) eventlet.greenthread.sleep(DEFAULT_SLEEP) self.call_reapply(self.uuid) row = self.db_row() version_id = row.version_id self.assertEqual(istate.States.finished, row.state) self.assertIsNone(row.error) self.call_reapply(self.uuid) # assert an finished -reapply-> reapplying -> finished state transition row = self.db_row() self.assertEqual(istate.States.finished, row.state) self.assertIsNone(row.error) self.assertNotEqual(version_id, row.version_id)