def notify(self, rpc_ctx, ex_id, data, event, timestamp, publishers): """Receives calls over RPC to notify on notification server. :param rpc_ctx: RPC request context dictionary. :param ex_id: Workflow, task, or action execution id. :param data: Dictionary to include in the notification message. :param event: Event being notified on. :param timestamp: Datetime when this event occurred. :param publishers: The list of publishers to send the notification. """ LOG.info( "Received RPC request 'notify'[ex_id=%s, event=%s, " "timestamp=%s, data=%s, publishers=%s]", ex_id, event, timestamp, data, utils.cut(publishers) ) self.notifier.notify( ex_id, data, event, timestamp, publishers )
def start_workflow(self, rpc_ctx, wf_identifier, wf_namespace, wf_ex_id, wf_input, description, params): """Receives calls over RPC to start workflows on engine. :param rpc_ctx: RPC request context. :param wf_identifier: Workflow definition identifier. :param wf_namespace: Workflow namespace. :param wf_input: Workflow input. :param wf_ex_id: Workflow execution id. If passed, it will be set in the new execution object. :param description: Workflow execution description. :param params: Additional workflow type specific parameters. :return: Workflow execution. """ LOG.info( "Received RPC request 'start_workflow'[workflow_identifier=%s, " "workflow_input=%s, description=%s, params=%s]", wf_identifier, utils.cut(wf_input), description, params ) return self.engine.start_workflow( wf_identifier, wf_namespace, wf_ex_id, wf_input, description, **params )
def start_action(self, rpc_ctx, action_name, action_input, description, params): """Receives calls over RPC to start actions on engine. :param rpc_ctx: RPC request context. :param action_name: name of the Action. :param action_input: input dictionary for Action. :param description: description of new Action execution. :param params: extra parameters to run Action. :return: Action execution. """ LOG.info( "Received RPC request 'start_action'[name=%s, input=%s, " "description=%s, params=%s]", action_name, utils.cut(action_input), description, params ) return self.engine.start_action( action_name, action_input, description, **params )
def start(self, wf_def, input_dict, desc='', params=None): """Start workflow. :param wf_def: Workflow definition. :param input_dict: Workflow input. :param desc: Workflow execution description. :param params: Workflow type specific parameters. """ assert not self.wf_ex # New workflow execution. self.wf_spec = spec_parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at) wf_trace.info( self.wf_ex, "Starting workflow [name=%s, input=%s]" % (wf_def.name, utils.cut(input_dict))) # TODO(rakhmerov): This call implicitly changes input_dict! Fix it! # After fix we need to move validation after adding risky fields. eng_utils.validate_input(wf_def, input_dict, self.wf_spec) self._create_execution(wf_def, input_dict, desc, params) self.set_state(states.RUNNING) wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) cmds = wf_ctrl.continue_workflow() dispatcher.dispatch_workflow_commands(self.wf_ex, cmds)
def start(self, wf_def, wf_ex_id, input_dict, desc='', params=None): """Start workflow. :param wf_def: Workflow definition. :param wf_ex_id: Workflow execution id. :param input_dict: Workflow input. :param desc: Workflow execution description. :param params: Workflow type specific parameters. :raises """ assert not self.wf_ex # New workflow execution. self.wf_spec = spec_parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at) wf_trace.info( self.wf_ex, 'Starting workflow [name=%s, input=%s]' % (wf_def.name, utils.cut(input_dict))) self.validate_input(input_dict) self._create_execution(wf_def, wf_ex_id, self.prepare_input(input_dict), desc, params) self.set_state(states.RUNNING) wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) dispatcher.dispatch_workflow_commands(self.wf_ex, wf_ctrl.continue_workflow())
def put(self, env): """Update an environment. :param env: Required. Environment structure to update """ acl.enforce('environments:update', context.ctx()) if not env.name: raise exceptions.InputException( 'Name of the environment is not provided.' ) LOG.debug("Update environment [name=%s, env=%s]", env.name, cut(env)) definition = json.loads(wsme_pecan.pecan.request.body.decode()) definition.pop('name') self._validate_environment( definition, ['description', 'variables', 'scope'] ) db_model = rest_utils.rest_retry_on_db_error( db_api.update_environment )(env.name, env.to_dict()) return resources.Environment.from_db_model(db_model)
def run_action(self, rpc_ctx, action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, timeout): """Receives calls over RPC to run action on executor. :param timeout: a period of time in seconds after which execution of action will be interrupted :param execution_context: A dict of values providing information about the current execution. :param rpc_ctx: RPC request context dictionary. :param action_ex_id: Action execution id. :param action_cls_str: Action class name. :param action_cls_attrs: Action class attributes. :param params: Action input parameters. :param safe_rerun: Tells if given action can be safely rerun. :return: Action result. """ LOG.debug( "Received RPC request 'run_action'[action_ex_id=%s, " "action_cls_str=%s, action_cls_attrs=%s, params=%s, " "timeout=%s]", action_ex_id, action_cls_str, action_cls_attrs, utils.cut(params), timeout ) redelivered = rpc_ctx.redelivered or False try: self._aer.add_action_ex_id(action_ex_id) res = self.executor.run_action( action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, redelivered, timeout=timeout ) LOG.debug( "Sending action result to engine" " [action_ex_id=%s, action_cls=%s]", action_ex_id, action_cls_str ) return res finally: self._aer.remove_action_ex_id(action_ex_id)
def post(self, env): """Create a new environment. :param env: Required. Environment structure to create """ acl.enforce('environments:create', context.ctx()) LOG.debug("Create environment [env=%s]", cut(env)) self._validate_environment( json.loads(wsme_pecan.pecan.request.body.decode()), ['name', 'description', 'variables']) db_model = db_api.create_environment(env.to_dict()) return resources.Environment.from_db_model(db_model)
def start(self, wf_def, wf_ex_id, input_dict, desc='', params=None): """Start workflow. :param wf_def: Workflow definition. :param wf_ex_id: Workflow execution id. :param input_dict: Workflow input. :param desc: Workflow execution description. :param params: Workflow type specific parameters. :raises """ assert not self.wf_ex # New workflow execution. self.wf_spec = spec_parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at ) wf_trace.info( self.wf_ex, 'Starting workflow [name=%s, input=%s]' % (wf_def.name, utils.cut(input_dict)) ) self.validate_input(input_dict) self._create_execution( wf_def, wf_ex_id, self.prepare_input(input_dict), desc, params ) self.set_state(states.RUNNING) # Publish event as soon as state is set to running. self.notify(events.WORKFLOW_LAUNCHED) wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec) dispatcher.dispatch_workflow_commands( self.wf_ex, wf_ctrl.continue_workflow() )
def notify(self, rpc_ctx, ex_id, data, event, timestamp, publishers): """Receives calls over RPC to notify on notification server. :param rpc_ctx: RPC request context dictionary. :param ex_id: Workflow, task, or action execution id. :param data: Dictionary to include in the notification message. :param event: Event being notified on. :param timestamp: Datetime when this event occurred. :param publishers: The list of publishers to send the notification. """ LOG.info( "Received RPC request 'notify'[ex_id=%s, event=%s, " "timestamp=%s, data=%s, publishers=%s]", ex_id, event, timestamp, data, utils.cut(publishers)) self.notifier.notify(ex_id, data, event, timestamp, publishers)
def run_action(self, rpc_ctx, action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, timeout): """Receives calls over RPC to run action on executor. :param timeout: a period of time in seconds after which execution of action will be interrupted :param execution_context: A dict of values providing information about the current execution. :param rpc_ctx: RPC request context dictionary. :param action_ex_id: Action execution id. :param action_cls_str: Action class name. :param action_cls_attrs: Action class attributes. :param params: Action input parameters. :param safe_rerun: Tells if given action can be safely rerun. :return: Action result. """ LOG.debug( "Received RPC request 'run_action'[action_ex_id=%s, " "action_cls_str=%s, action_cls_attrs=%s, params=%s, " "timeout=%s]", action_ex_id, action_cls_str, action_cls_attrs, utils.cut(params), timeout) redelivered = rpc_ctx.redelivered or False try: self._aer.add_action_ex_id(action_ex_id) res = self.executor.run_action(action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, execution_context, redelivered, timeout=timeout) LOG.debug( "Sending action result to engine" " [action_ex_id=%s, action_cls=%s]", action_ex_id, action_cls_str) return res finally: self._aer.remove_action_ex_id(action_ex_id)
def post(self, env): """Create a new environment. :param env: Required. Environment structure to create """ acl.enforce('environments:create', context.ctx()) LOG.debug("Create environment [env=%s]", cut(env)) self._validate_environment( json.loads(wsme_pecan.pecan.request.body.decode()), ['name', 'description', 'variables'] ) db_model = rest_utils.rest_retry_on_db_error( db_api.create_environment )(env.to_dict()) return resources.Environment.from_db_model(db_model)
def start_workflow(self, rpc_ctx, workflow_identifier, workflow_input, description, params): """Receives calls over RPC to start workflows on engine. :param rpc_ctx: RPC request context. :param workflow_identifier: Workflow definition identifier. :param workflow_input: Workflow input. :param description: Workflow execution description. :param params: Additional workflow type specific parameters. :return: Workflow execution. """ LOG.info( "Received RPC request 'start_workflow'[rpc_ctx=%s," " workflow_identifier=%s, workflow_input=%s, description=%s, " "params=%s]" % (rpc_ctx, workflow_identifier, utils.cut(workflow_input), description, params)) return self.engine.start_workflow(workflow_identifier, workflow_input, description, **params)
def put(self, env): """Update an environment. :param env: Required. Environment structure to update """ acl.enforce('environments:update', context.ctx()) if not env.name: raise exceptions.InputException( 'Name of the environment is not provided.') LOG.debug("Update environment [name=%s, env=%s]", env.name, cut(env)) definition = json.loads(wsme_pecan.pecan.request.body.decode()) definition.pop('name') self._validate_environment(definition, ['description', 'variables', 'scope']) db_model = db_api.update_environment(env.name, env.to_dict()) return resources.Environment.from_db_model(db_model)
def run_action(self, rpc_ctx, action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun): """Receives calls over RPC to run action on executor. :param rpc_ctx: RPC request context dictionary. :param action_ex_id: Action execution id. :param action_cls_str: Action class name. :param action_cls_attrs: Action class attributes. :param params: Action input parameters. :param safe_rerun: Tells if given action can be safely rerun. :return: Action result. """ LOG.info("Received RPC request 'run_action'[action_ex_id=%s, " "action_cls_str=%s, action_cls_attrs=%s, params=%s]" % (action_ex_id, action_cls_str, action_cls_attrs, utils.cut(params))) redelivered = rpc_ctx.redelivered or False return self.executor.run_action(action_ex_id, action_cls_str, action_cls_attrs, params, safe_rerun, redelivered)
def test_cut_dict_for_state_info(self): d = {} for i in range(2000): d[i] = {'value': 'This is a string that exceeds 35 characters'} s = utils.cut(d, 65500) assert len(s) <= 65500, str(len(s)) + ' : ' + s[:-30]
def test_cut_dict_with_large_dict_of_int(self): d = {} for i in range(65535): d[i] = i s = utils.cut(d, 65535) assert len(s) <= 65535, len(s)
) for cls in utils.iter_subclasses(Execution): event.listen( # Catch and trim Execution.state_info to always fit allocated size. # Note that the limit is 65500 which is less than 65535 (2^16 -1). # The reason is that utils.cut() is not exactly accurate in case if # the value is not a string, but, for example, a dictionary. If we # limit it exactly to 65535 then once in a while it may go slightly # beyond the allowed maximum size. It may depend on the order of # keys in a string representation and other things that are hidden # inside utils.cut_dict() method. cls.state_info, 'set', lambda t, v, o, i: utils.cut(v, 65500), retval=True ) # Many-to-one for 'ActionExecution' and 'TaskExecution'. ActionExecution.task_execution_id = sa.Column( sa.String(36), sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'), nullable=True ) TaskExecution.action_executions = relationship( ActionExecution, backref=backref('task_execution', remote_side=[TaskExecution.id]),
@property def executions(self): return ( self.action_executions if not self.spec.get('workflow') else self.workflow_executions ) for cls in utils.iter_subclasses(Execution): event.listen( # Catch and trim Execution.state_info to always fit allocated size. cls.state_info, 'set', lambda t, v, o, i: utils.cut(v, 65532), retval=True ) # Many-to-one for 'ActionExecution' and 'TaskExecution'. ActionExecution.task_execution_id = sa.Column( sa.String(36), sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'), nullable=True ) TaskExecution.action_executions = relationship( ActionExecution, backref=backref('task_execution', remote_side=[TaskExecution.id]),
def _result_msg(): if state == states.ERROR: return "error = %s" % utils.cut(result.error) return "result = %s" % utils.cut(result.data)
def test_cut_dict_with_large_dict_of_dict(self): d = {} for i in range(65535): d[i] = {'value': str(i)} s = utils.cut(d, 65535) assert len(s) <= 65535, len(s)
# Whether the task is fully processed (publishing and calculating commands # after it). It allows to simplify workflow controller implementations # significantly. processed = sa.Column(sa.BOOLEAN, default=False) # Data Flow properties. in_context = sa.Column(st.JsonLongDictType()) published = sa.Column(st.JsonLongDictType()) for cls in utils.iter_subclasses(Execution): event.listen( # Catch and trim Execution.state_info to always fit allocated size. cls.state_info, 'set', lambda t, v, o, i: utils.cut(v, 65532), retval=True) def validate_long_type_length(cls, field_name, value): """Makes sure the value does not exceeds the maximum size.""" if value: # Get the configured limit. size_limit_kb = cfg.CONF.engine.execution_field_size_limit_kb # If the size is unlimited. if size_limit_kb < 0: return size_kb = int(sys.getsizeof(str(value)) / 1024)
def cut_repr(self): return 'Result [data=%s, error=%s, cancel=%s]' % (utils.cut( self.data), utils.cut(self.error), str(self.cancel))
# Whether the task is fully processed (publishing and calculating commands # after it). It allows to simplify workflow controller implementations # significantly. processed = sa.Column(sa.BOOLEAN, default=False) # Data Flow properties. in_context = sa.Column(st.JsonLongDictType()) published = sa.Column(st.JsonLongDictType()) for cls in utils.iter_subclasses(Execution): event.listen( # Catch and trim Execution.state_info to always fit allocated size. cls.state_info, 'set', lambda t, v, o, i: utils.cut(v, 1020), retval=True) def validate_long_type_length(cls, field_name, value): """Makes sure the value does not exceeds the maximum size.""" if value: # Get the configured limit. size_limit_kb = cfg.CONF.engine.execution_field_size_limit_kb # If the size is unlimited. if (size_limit_kb < 0): return size_kb = sys.getsizeof(str(value)) / 1024 if (size_kb > size_limit_kb):
def test_cut_list_with_large_dict_of_int(self): d = [i for i in range(65535)] s = utils.cut(d, 65535) self.assertThat(len(s), ttm.Not(ttm.GreaterThan(65535)))
def test_cut_dict_with_large_dict_of_str(self): d = {} for i in range(65535): d[str(i)] = str(i) s = utils.cut(d, 65535) self.assertThat(len(s), ttm.Not(ttm.GreaterThan(65535)))
def test_cut_dict_with_large_dict_of_dict(self): d = {} for i in range(65535): d[i] = {'value': str(i)} s = utils.cut(d, 65535) self.assertThat(len(s), ttm.Not(ttm.GreaterThan(65535)))
def test_cut_dict_with_large_dict_of_str(self): d = {} for i in range(65535): d[str(i)] = str(i) s = utils.cut(d, 65535) assert len(s) <= 65535, len(s)
def test_cut_dict_for_state_info(self): d = {} for i in range(2000): d[i] = {'value': 'This is a string that exceeds 35 characters'} s = utils.cut(d, 65500) self.assertThat(len(s), ttm.Not(ttm.GreaterThan(65500)))
# Whether the task is fully processed (publishing and calculating commands # after it). It allows to simplify workflow controller implementations # significantly. processed = sa.Column(sa.BOOLEAN, default=False) # Data Flow properties. in_context = sa.Column(st.JsonLongDictType()) published = sa.Column(st.JsonDictType()) for cls in utils.iter_subclasses(Execution): event.listen( # Catch and trim Execution.state_info to always fit allocated size. cls.state_info, 'set', lambda t, v, o, i: utils.cut(v, 1020), retval=True ) # Many-to-one for 'Execution' and 'TaskExecution'. Execution.task_execution_id = sa.Column( sa.String(36), sa.ForeignKey(TaskExecution.id), nullable=True ) TaskExecution.executions = relationship( Execution, backref=backref('task_execution', remote_side=[TaskExecution.id]), cascade='all, delete-orphan',
return d for cls in utils.iter_subclasses(Execution): event.listen( # Catch and trim Execution.state_info to always fit allocated size. # Note that the limit is 65500 which is less than 65535 (2^16 -1). # The reason is that utils.cut() is not exactly accurate in case if # the value is not a string, but, for example, a dictionary. If we # limit it exactly to 65535 then once in a while it may go slightly # beyond the allowed maximum size. It may depend on the order of # keys in a string representation and other things that are hidden # inside utils.cut_dict() method. cls.state_info, 'set', lambda t, v, o, i: utils.cut(v, 65500), retval=True ) # Many-to-one for 'ActionExecution' and 'TaskExecution'. ActionExecution.task_execution_id = sa.Column( sa.String(36), sa.ForeignKey(TaskExecution.id, ondelete='CASCADE'), nullable=True ) TaskExecution.action_executions = relationship( ActionExecution, backref=backref('task_execution', remote_side=[TaskExecution.id]),