def _mark_inquiry_complete(self, inquiry_id, result): """Mark Inquiry as completed This function updates the local LiveAction and Execution with a successful status as well as call the "post_run" function for the Inquirer runner so that the appropriate callback function is executed :param inquiry: The Inquiry for which the response is given :param requester_user: The user providing the response :rtype: bool - True if requester_user is able to respond. False if not. """ # Update inquiry's execution result with a successful status and the validated response liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_SUCCEEDED, runner_info=system_info.get_process_info(), result=result, liveaction_id=inquiry_id) executions.update_execution(liveaction_db) # Call Inquiry runner's post_run to trigger callback to workflow runner_container = get_runner_container() action_db = get_action_by_ref(liveaction_db.action) runnertype_db = get_runnertype_by_name(action_db.runner_type['name']) runner = runner_container._get_runner(runnertype_db, action_db, liveaction_db) runner.post_run(status=action_constants.LIVEACTION_STATUS_SUCCEEDED, result=result) return liveaction_db
def respond(inquiry, response, requester=None): # Set requester to system user is not provided. if not requester: requester = cfg.CONF.system_user.user # Retrieve the liveaction from the database. liveaction_db = lv_db_access.LiveAction.get_by_id( inquiry.liveaction.get("id")) # Resume the parent workflow first. If the action execution for the inquiry is updated first, # it triggers handling of the action execution completion which will interact with the paused # parent workflow. The resuming logic that is executed here will then race with the completion # of the inquiry action execution, which will randomly result in the parent workflow stuck in # paused state. if liveaction_db.context.get("parent"): LOG.debug('Resuming workflow parent(s) for inquiry "%s".' % str(inquiry.id)) # For action execution under Action Chain workflows, request the entire # workflow to resume. Orquesta handles resume differently and so does not require root # to resume. Orquesta allows for specifc branches to resume while other is paused. When # there is no other paused branches, the conductor will resume the rest of the workflow. resume_target = ( action_service.get_parent_liveaction(liveaction_db) if workflow_service.is_action_execution_under_workflow_context( liveaction_db) else action_service.get_root_liveaction(liveaction_db)) if resume_target.status in action_constants.LIVEACTION_PAUSE_STATES: action_service.request_resume(resume_target, requester) # Succeed the liveaction and update result with the inquiry response. LOG.debug('Updating response for inquiry "%s".' % str(inquiry.id)) result = fast_deepcopy_dict(inquiry.result) result["response"] = response liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_SUCCEEDED, end_timestamp=date_utils.get_datetime_utc_now(), runner_info=sys_info_utils.get_process_info(), result=result, liveaction_id=str(liveaction_db.id), ) # Sync the liveaction with the corresponding action execution. execution_service.update_execution(liveaction_db) # Invoke inquiry post run to trigger a callback to parent workflow. LOG.debug('Invoking post run for inquiry "%s".' % str(inquiry.id)) runner_container = container.get_runner_container() action_db = action_utils.get_action_by_ref(liveaction_db.action) runnertype_db = action_utils.get_runnertype_by_name( action_db.runner_type["name"]) runner = runner_container._get_runner(runnertype_db, action_db, liveaction_db) runner.post_run(status=action_constants.LIVEACTION_STATUS_SUCCEEDED, result=result) return liveaction_db
def register_service_in_service_registry(service, capabilities=None, start_heart=True): """ Register provided service in the service registry and start the heartbeat process. :param service: Service name which will also be used for a group name (e.g. "api"). :type service: ``str`` :param capabilities: Optional metadata associated with the service. :type capabilities: ``dict`` """ # NOTE: It's important that we pass start_heart=True to start the hearbeat process coordinator = coordination.get_coordinator(start_heart=start_heart) member_id = coordination.get_member_id() # 1. Create a group with the name of the service if not isinstance(service, six.binary_type): group_id = service.encode('utf-8') else: group_id = service try: coordinator.create_group(group_id).get() except GroupAlreadyExist: pass # Include common capabilities such as hostname and process ID proc_info = system_info.get_process_info() capabilities['hostname'] = proc_info['hostname'] capabilities['pid'] = proc_info['pid'] # 1. Join the group as a member LOG.debug('Joining service registry group "%s" as member_id "%s" with capabilities "%s"' % (group_id, member_id, capabilities)) return coordinator.join_group(group_id, capabilities=capabilities).get()
def coordinator_setup(): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout proc_info = system_info.get_process_info() member_id = six.b('%s_%d' % (proc_info['hostname'], proc_info['pid'])) if url: coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout) else: # Use a no-op backend # Note: We don't use tooz to obtain a reference since for this to work we would need to # register a plugin inside setup.py entry_point and use python setup.py develop for tests # to work coordinator = NoOpDriver(member_id) coordinator.start() return coordinator
def coordinator_setup(): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout proc_info = system_info.get_process_info() member_id = '%s_%d' % (proc_info['hostname'], proc_info['pid']) if url: coordinator = coordination.get_coordinator(url, member_id, lock_timeout=lock_timeout) else: # Use a no-op backend # Note: We don't use tooz to obtain a reference since for this to work we would need to # register a plugin inside setup.py entry_point and use python setup.py develop for tests # to work coordinator = NoOpDriver(member_id) coordinator.start() return coordinator
def test_get_group_members(self): proc_info = system_info.get_process_info() member_id = get_member_id() # 1. Group doesn't exist resp = self.app.get('/v1/service_registry/groups/doesnt-exist/members', expect_errors=True) self.assertEqual(resp.status_int, 404) self.assertEqual(resp.json['faultstring'], 'Group with ID "doesnt-exist" not found.') # 2. Group exists and has a single member resp = self.app.get('/v1/service_registry/groups/mock_service/members') self.assertEqual(resp.status_int, 200) self.assertEqual(resp.json, { 'members': [ { 'group_id': 'mock_service', 'member_id': member_id.decode('utf-8'), 'capabilities': { 'key1': 'value1', 'name': 'mock_service', 'hostname': proc_info['hostname'], 'pid': proc_info['pid'] } } ] })
def test_get_group_members(self): proc_info = system_info.get_process_info() member_id = get_member_id() # 1. Group doesn't exist resp = self.app.get('/v1/service_registry/groups/doesnt-exist/members', expect_errors=True) self.assertEqual(resp.status_int, 404) self.assertEqual(resp.json['faultstring'], 'Group with ID "doesnt-exist" not found.') # 2. Group exists and has a single member resp = self.app.get('/v1/service_registry/groups/mock_service/members') self.assertEqual(resp.status_int, 200) self.assertEqual( resp.json, { 'members': [{ 'group_id': 'mock_service', 'member_id': member_id.decode('utf-8'), 'capabilities': { 'key1': 'value1', 'name': 'mock_service', 'hostname': proc_info['hostname'], 'pid': proc_info['pid'] } }] })
def test_get_group_members(self): proc_info = system_info.get_process_info() member_id = get_member_id() # 1. Group doesn't exist resp = self.app.get("/v1/service_registry/groups/doesnt-exist/members", expect_errors=True) self.assertEqual(resp.status_int, 404) self.assertEqual(resp.json["faultstring"], 'Group with ID "doesnt-exist" not found.') # 2. Group exists and has a single member resp = self.app.get("/v1/service_registry/groups/mock_service/members") self.assertEqual(resp.status_int, 200) self.assertEqual( resp.json, { "members": [{ "group_id": "mock_service", "member_id": member_id.decode("utf-8"), "capabilities": { "key1": "value1", "name": "mock_service", "hostname": proc_info["hostname"], "pid": proc_info["pid"], }, }] }, )
def get_member_id(): """ Retrieve member if for the current process. :rtype: ``bytes`` """ proc_info = system_info.get_process_info() member_id = six.b("%s_%d" % (proc_info["hostname"], proc_info["pid"])) return member_id
def get_member_id(): """ Retrieve member if for the current process. :rtype: ``bytes`` """ proc_info = system_info.get_process_info() member_id = six.b('%s_%d' % (proc_info['hostname'], proc_info['pid'])) return member_id
def _run_action(self, liveaction_db): # stamp liveaction with process_info runner_info = system_info.get_process_info() # Update liveaction status to "running" liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_RUNNING, runner_info=runner_info, liveaction_id=liveaction_db.id) self._running_liveactions.add(liveaction_db.id) action_execution_db = executions.update_execution(liveaction_db) # Launch action extra = { 'action_execution_db': action_execution_db, 'liveaction_db': liveaction_db } LOG.audit('Launching action execution.', extra=extra) # the extra field will not be shown in non-audit logs so temporarily log at info. LOG.info( 'Dispatched {~}action_execution: %s / {~}live_action: %s with "%s" status.', action_execution_db.id, liveaction_db.id, liveaction_db.status) extra = {'liveaction_db': liveaction_db} try: result = self.container.dispatch(liveaction_db) LOG.debug('Runner dispatch produced result: %s', result) if not result: raise ActionRunnerException('Failed to execute action.') except: _, ex, tb = sys.exc_info() extra['error'] = str(ex) LOG.info('Action "%s" failed: %s' % (liveaction_db.action, str(ex)), extra=extra) liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_FAILED, liveaction_id=liveaction_db.id, result={ 'error': str(ex), 'traceback': ''.join(traceback.format_tb(tb, 20)) }) executions.update_execution(liveaction_db) raise finally: # In the case of worker shutdown, the items are removed from _running_liveactions. # As the subprocesses for action executions are terminated, this finally block # will be executed. Set remove will result in KeyError if item no longer exists. # Use set discard to not raise the KeyError. self._running_liveactions.discard(liveaction_db.id) return result
def process(self, liveaction): """Dispatches the LiveAction to appropriate action runner. LiveAction in statuses other than "scheduled" are ignored. If LiveAction is already canceled and result is empty, the LiveAction is updated with a generic exception message. :param liveaction: Scheduled action execution request. :type liveaction: ``st2common.models.db.liveaction.LiveActionDB`` :rtype: ``dict`` """ if liveaction.status == action_constants.LIVEACTION_STATUS_CANCELED: LOG.info('%s is not executing %s (id=%s) with "%s" status.', self.__class__.__name__, type(liveaction), liveaction.id, liveaction.status) if not liveaction.result: updated_liveaction = action_utils.update_liveaction_status( status=liveaction.status, result={'message': 'Action execution canceled by user.'}, liveaction_id=liveaction.id) executions.update_execution(updated_liveaction) return if liveaction.status != action_constants.LIVEACTION_STATUS_SCHEDULED: LOG.info('%s is not executing %s (id=%s) with "%s" status.', self.__class__.__name__, type(liveaction), liveaction.id, liveaction.status) return try: liveaction_db = action_utils.get_liveaction_by_id(liveaction.id) except StackStormDBObjectNotFoundError: LOG.exception('Failed to find liveaction %s in the database.', liveaction.id) raise # stamp liveaction with process_info runner_info = system_info.get_process_info() # Update liveaction status to "running" liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_RUNNING, runner_info=runner_info, liveaction_id=liveaction_db.id) action_execution_db = executions.update_execution(liveaction_db) # Launch action extra = {'action_execution_db': action_execution_db, 'liveaction_db': liveaction_db} LOG.audit('Launching action execution.', extra=extra) # the extra field will not be shown in non-audit logs so temporarily log at info. LOG.info('Dispatched {~}action_execution: %s / {~}live_action: %s with "%s" status.', action_execution_db.id, liveaction_db.id, liveaction.status) return self._run_action(liveaction_db)
def respond(inquiry, response, requester=None): # Set requester to system user is not provided. if not requester: requester = cfg.CONF.system_user.user # Retrieve the liveaction from the database. liveaction_db = lv_db_access.LiveAction.get_by_id(inquiry.liveaction.get('id')) # Resume the parent workflow first. If the action execution for the inquiry is updated first, # it triggers handling of the action execution completion which will interact with the paused # parent workflow. The resuming logic that is executed here will then race with the completion # of the inquiry action execution, which will randomly result in the parent workflow stuck in # paused state. if liveaction_db.context.get('parent'): LOG.debug('Resuming workflow parent(s) for inquiry "%s".' % str(inquiry.id)) # For action execution under Action Chain and Mistral workflows, request the entire # workflow to resume. Orquesta handles resume differently and so does not require root # to resume. Orquesta allows for specifc branches to resume while other is paused. When # there is no other paused branches, the conductor will resume the rest of the workflow. resume_target = ( action_service.get_parent_liveaction(liveaction_db) if workflow_service.is_action_execution_under_workflow_context(liveaction_db) else action_service.get_root_liveaction(liveaction_db) ) if resume_target.status in action_constants.LIVEACTION_PAUSE_STATES: action_service.request_resume(resume_target, requester) # Succeed the liveaction and update result with the inquiry response. LOG.debug('Updating response for inquiry "%s".' % str(inquiry.id)) result = copy.deepcopy(inquiry.result) result['response'] = response liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_SUCCEEDED, end_timestamp=date_utils.get_datetime_utc_now(), runner_info=sys_info_utils.get_process_info(), result=result, liveaction_id=str(liveaction_db.id) ) # Sync the liveaction with the corresponding action execution. execution_service.update_execution(liveaction_db) # Invoke inquiry post run to trigger a callback to parent workflow. LOG.debug('Invoking post run for inquiry "%s".' % str(inquiry.id)) runner_container = container.get_runner_container() action_db = action_utils.get_action_by_ref(liveaction_db.action) runnertype_db = action_utils.get_runnertype_by_name(action_db.runner_type['name']) runner = runner_container._get_runner(runnertype_db, action_db, liveaction_db) runner.post_run(status=action_constants.LIVEACTION_STATUS_SUCCEEDED, result=result) return liveaction_db
def execute_action(self, liveaction): # Note: We only want to execute actions which haven't completed yet if liveaction.status == LIVEACTION_STATUS_CANCELED: LOG.info('Not executing liveaction %s. User canceled execution.', liveaction.id) if not liveaction.result: update_liveaction_status(status=LIVEACTION_STATUS_CANCELED, result={'message': 'Action execution canceled by user.'}, liveaction_id=liveaction.id) return if liveaction.status in [LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED]: LOG.info('Ignoring liveaction %s which has already finished', liveaction.id) return try: liveaction_db = get_liveaction_by_id(liveaction.id) except StackStormDBObjectNotFoundError: LOG.exception('Failed to find liveaction %s in the database.', liveaction.id) raise # stamp liveaction with process_info runner_info = system_info.get_process_info() # Update liveaction status to "running" liveaction_db = update_liveaction_status(status=LIVEACTION_STATUS_RUNNING, runner_info=runner_info, liveaction_id=liveaction_db.id) action_execution_db = executions.update_execution(liveaction_db) # Launch action extra = {'action_execution_db': action_execution_db, 'liveaction_db': liveaction_db} LOG.audit('Launching action execution.', extra=extra) # the extra field will not be shown in non-audit logs so temporarily log at info. LOG.info('{~}action_execution: %s / {~}live_action: %s', action_execution_db.id, liveaction_db.id) try: result = self.container.dispatch(liveaction_db) LOG.debug('Runner dispatch produced result: %s', result) if not result: raise ActionRunnerException('Failed to execute action.') except Exception: liveaction_db = update_liveaction_status(status=LIVEACTION_STATUS_FAILED, liveaction_id=liveaction_db.id) raise return result
def _run_action(self, liveaction_db): # stamp liveaction with process_info runner_info = system_info.get_process_info() # Update liveaction status to "running" liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_RUNNING, runner_info=runner_info, liveaction_id=liveaction_db.id) action_execution_db = executions.update_execution(liveaction_db) # Launch action extra = { 'action_execution_db': action_execution_db, 'liveaction_db': liveaction_db } LOG.audit('Launching action execution.', extra=extra) # the extra field will not be shown in non-audit logs so temporarily log at info. LOG.info( 'Dispatched {~}action_execution: %s / {~}live_action: %s with "%s" status.', action_execution_db.id, liveaction_db.id, liveaction_db.status) extra = {'liveaction_db': liveaction_db} try: result = self.container.dispatch(liveaction_db) LOG.debug('Runner dispatch produced result: %s', result) if not result: raise ActionRunnerException('Failed to execute action.') except: _, ex, tb = sys.exc_info() extra['error'] = str(ex) LOG.info('Action "%s" failed: %s' % (liveaction_db.action, str(ex)), extra=extra) liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_FAILED, liveaction_id=liveaction_db.id, result={ 'error': str(ex), 'traceback': ''.join(traceback.format_tb(tb, 20)) }) executions.update_execution(liveaction_db) raise return result
def _run_action(self, liveaction_db): # stamp liveaction with process_info runner_info = system_info.get_process_info() # Update liveaction status to "running" liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_RUNNING, runner_info=runner_info, liveaction_id=liveaction_db.id) self._running_liveactions.add(liveaction_db.id) action_execution_db = executions.update_execution(liveaction_db) # Launch action extra = {'action_execution_db': action_execution_db, 'liveaction_db': liveaction_db} LOG.audit('Launching action execution.', extra=extra) # the extra field will not be shown in non-audit logs so temporarily log at info. LOG.info('Dispatched {~}action_execution: %s / {~}live_action: %s with "%s" status.', action_execution_db.id, liveaction_db.id, liveaction_db.status) extra = {'liveaction_db': liveaction_db} try: result = self.container.dispatch(liveaction_db) LOG.debug('Runner dispatch produced result: %s', result) if not result and not liveaction_db.action_is_workflow: raise ActionRunnerException('Failed to execute action.') except: _, ex, tb = sys.exc_info() extra['error'] = str(ex) LOG.info('Action "%s" failed: %s' % (liveaction_db.action, str(ex)), extra=extra) liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_FAILED, liveaction_id=liveaction_db.id, result={'error': str(ex), 'traceback': ''.join(traceback.format_tb(tb, 20))}) executions.update_execution(liveaction_db) raise finally: # In the case of worker shutdown, the items are removed from _running_liveactions. # As the subprocesses for action executions are terminated, this finally block # will be executed. Set remove will result in KeyError if item no longer exists. # Use set discard to not raise the KeyError. self._running_liveactions.discard(liveaction_db.id) return result
def _run_action(self, liveaction_db): # stamp liveaction with process_info runner_info = system_info.get_process_info() # Update liveaction status to "running" liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_RUNNING, runner_info=runner_info, liveaction_id=liveaction_db.id ) self._running_liveactions.add(liveaction_db.id) action_execution_db = executions.update_execution(liveaction_db) # Launch action extra = {"action_execution_db": action_execution_db, "liveaction_db": liveaction_db} LOG.audit("Launching action execution.", extra=extra) # the extra field will not be shown in non-audit logs so temporarily log at info. LOG.info( 'Dispatched {~}action_execution: %s / {~}live_action: %s with "%s" status.', action_execution_db.id, liveaction_db.id, liveaction_db.status, ) extra = {"liveaction_db": liveaction_db} try: result = self.container.dispatch(liveaction_db) LOG.debug("Runner dispatch produced result: %s", result) if not result: raise ActionRunnerException("Failed to execute action.") except: _, ex, tb = sys.exc_info() extra["error"] = str(ex) LOG.info('Action "%s" failed: %s' % (liveaction_db.action, str(ex)), extra=extra) liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_FAILED, liveaction_id=liveaction_db.id, result={"error": str(ex), "traceback": "".join(traceback.format_tb(tb, 20))}, ) executions.update_execution(liveaction_db) raise finally: self._running_liveactions.remove(liveaction_db.id) return result
def coordinator_setup(): """ Sets up the client for the coordination service. URL examples for connection: zake:// file:///tmp redis://username:password@host:port mysql://username:password@host:port/dbname """ url = cfg.CONF.coordination.url lock_timeout = cfg.CONF.coordination.lock_timeout proc_info = system_info.get_process_info() member_id = '%s_%d' % (proc_info['hostname'], proc_info['pid']) coordinator = tooz_coord.get_coordinator(url, member_id, lock_timeout=lock_timeout) coordinator.start() return coordinator
def _run_action(self, liveaction_db): # stamp liveaction with process_info runner_info = system_info.get_process_info() # Update liveaction status to "running" liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_RUNNING, runner_info=runner_info, liveaction_id=liveaction_db.id) action_execution_db = executions.update_execution(liveaction_db) # Launch action extra = {'action_execution_db': action_execution_db, 'liveaction_db': liveaction_db} LOG.audit('Launching action execution.', extra=extra) # the extra field will not be shown in non-audit logs so temporarily log at info. LOG.info('Dispatched {~}action_execution: %s / {~}live_action: %s with "%s" status.', action_execution_db.id, liveaction_db.id, liveaction_db.status) extra = {'liveaction_db': liveaction_db} try: result = self.container.dispatch(liveaction_db) LOG.debug('Runner dispatch produced result: %s', result) if not result: raise ActionRunnerException('Failed to execute action.') except: _, ex, tb = sys.exc_info() extra['error'] = str(ex) LOG.info('Action "%s" failed: %s' % (liveaction_db.action, str(ex)), extra=extra) liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_FAILED, liveaction_id=liveaction_db.id, result={'error': str(ex), 'traceback': ''.join(traceback.format_tb(tb, 20))}) executions.update_execution(liveaction_db) raise return result
def process(self, liveaction): """Dispatches the LiveAction to appropriate action runner. LiveAction in statuses other than "scheduled" are ignored. If LiveAction is already canceled and result is empty, the LiveAction is updated with a generic exception message. :param liveaction: Scheduled action execution request. :type liveaction: ``st2common.models.db.liveaction.LiveActionDB`` :rtype: ``dict`` """ if liveaction.status == action_constants.LIVEACTION_STATUS_CANCELED: LOG.info('%s is not executing %s (id=%s) with "%s" status.', self.__class__.__name__, type(liveaction), liveaction.id, liveaction.status) if not liveaction.result: action_utils.update_liveaction_status( status=liveaction.status, result={'message': 'Action execution canceled by user.'}, liveaction_id=liveaction.id) return if liveaction.status != action_constants.LIVEACTION_STATUS_SCHEDULED: LOG.info('%s is not executing %s (id=%s) with "%s" status.', self.__class__.__name__, type(liveaction), liveaction.id, liveaction.status) return try: liveaction_db = action_utils.get_liveaction_by_id(liveaction.id) except StackStormDBObjectNotFoundError: LOG.exception('Failed to find liveaction %s in the database.', liveaction.id) raise # stamp liveaction with process_info runner_info = system_info.get_process_info() # Update liveaction status to "running" liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_RUNNING, runner_info=runner_info, liveaction_id=liveaction_db.id) action_execution_db = executions.update_execution(liveaction_db) # Launch action extra = {'action_execution_db': action_execution_db, 'liveaction_db': liveaction_db} LOG.audit('Launching action execution.', extra=extra) # the extra field will not be shown in non-audit logs so temporarily log at info. LOG.info('Dispatched {~}action_execution: %s / {~}live_action: %s with "%s" status.', action_execution_db.id, liveaction_db.id, liveaction.status) try: result = self.container.dispatch(liveaction_db) LOG.debug('Runner dispatch produced result: %s', result) if not result: raise ActionRunnerException('Failed to execute action.') except Exception as e: extra['error'] = str(e) LOG.info('Action "%s" failed: %s' % (liveaction_db.action, str(e)), extra=extra) liveaction_db = action_utils.update_liveaction_status( status=action_constants.LIVEACTION_STATUS_FAILED, liveaction_id=liveaction_db.id) raise return result
def test_process_info(self): process_info = system_info.get_process_info() self.assertEqual(process_info['hostname'], socket.gethostname()) self.assertEqual(process_info['pid'], os.getpid())
def test_process_info(self): process_info = system_info.get_process_info() self.assertEqual(process_info["hostname"], socket.gethostname()) self.assertEqual(process_info["pid"], os.getpid())