def job_run(self, tmpdir, mock_event_bus): action_foo = action.Action('foo', 'command', None) action_after_foo = action.Action('after_foo', 'command', None) action_bar = action.Action('bar', 'command', None, triggered_by={'trigger'}) action_graph = actiongraph.ActionGraph( action_map={ 'foo': action_foo, 'after_foo': action_after_foo, 'bar': action_bar, }, required_actions={'foo': set(), 'after_foo': {'foo'}, 'bar': set()}, required_triggers={'foo': set(), 'after_foo': set(), 'bar': {'trigger'}}, ) mock_job = mock.Mock( output_path=filehandler.OutputPath(tmpdir), action_graph=action_graph, action_runner=actioncommand.NoActionRunnerFactory(), ) job_run = jobrun.JobRun.for_job( mock_job, run_num=1, run_time=datetime.datetime.now(), node=mock.Mock(), manual=False, ) return job_run
def __init__(self, job_run_id, name, node, bare_command=None, parent_context=None, output_path=None, cleanup=False, start_time=None, end_time=None, run_state=STATE_SCHEDULED, rendered_command=None, exit_status=None, action_runner=None): self.job_run_id = job_run_id self.action_name = name self.node = node self.start_time = start_time self.end_time = end_time self.exit_status = exit_status self.bare_command = bare_command self.rendered_command = rendered_command self.action_runner = action_runner or NoActionRunnerFactory self.machine = state.StateMachine(self.STATE_SCHEDULED, delegate=self, force_state=run_state) self.is_cleanup = cleanup self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.context = command_context.build_context(self, parent_context)
def __init__(self, name, scheduler, queueing=True, all_nodes=False, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None): super(Job, self).__init__() self.name = name self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.output_path = output_path or filehandler.OutputPath() self.output_path.append(name) self.event = event.get_recorder(self.name) self.context = command_context.build_context(self, parent_context) self.event.ok('created')
def __init__( self, job_name, run_num, run_time, node, output_path=None, base_context=None, action_runs=None, action_graph=None, manual=None, ): super(JobRun, self).__init__() self.job_name = maybe_decode(job_name) self.run_num = run_num self.run_time = run_time self.node = node self.output_path = output_path or filehandler.OutputPath() self.output_path.append(str(self.run_num)) self.action_runs_proxy = None self._action_runs = None self.action_graph = action_graph self.manual = manual if action_runs: self.action_runs = action_runs self.context = command_context.build_context(self, base_context)
def build(self, job_config): log.debug("Building new job %s", job_config.name) output_path = filehandler.OutputPath(self.output_stream_dir) scheduler = scheduler_from_config(job_config.schedule, self.time_zone) job = Job.from_config(job_config, scheduler, self.context, output_path, self.action_runner) return JobScheduler(job)
def __init__(self, job_name, run_num, run_time, node, output_path=None, base_context=None, action_runs=None, action_graph=None, manual=None): super(JobRun, self).__init__() self.job_name = job_name self.run_num = run_num self.run_time = run_time self.node = node self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.action_runs_proxy = None self._action_runs = None self.action_graph = action_graph self.manual = manual self.event = event.get_recorder(self.full_id) self.event.ok('created') if action_runs: self.action_runs = action_runs self.context = command_context.build_context(self, base_context)
def __init__( self, job_run_id, name, node, command_config, parent_context=None, output_path=None, cleanup=False, start_time=None, end_time=None, run_state=SCHEDULED, exit_status=None, attempts=None, action_runner=None, retries_remaining=None, retries_delay=None, machine=None, executor=None, trigger_downstreams=None, triggered_by=None, on_upstream_rerun=None, trigger_timeout_timestamp=None, original_command=None, ): super().__init__() self.job_run_id = maybe_decode(job_run_id) self.action_name = maybe_decode(name) self.node = node self.start_time = start_time self.end_time = end_time self.exit_status = exit_status self.action_runner = action_runner or NoActionRunnerFactory() self.machine = machine or Machine.from_machine( ActionRun.STATE_MACHINE, None, run_state ) self.is_cleanup = cleanup self.executor = executor self.command_config = command_config self.original_command = original_command or command_config.command self.attempts = attempts or [] self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.action_name) self.context = command_context.build_context(self, parent_context) self.retries_remaining = retries_remaining self.retries_delay = retries_delay self.trigger_downstreams = trigger_downstreams self.triggered_by = triggered_by self.on_upstream_rerun = on_upstream_rerun self.trigger_timeout_timestamp = trigger_timeout_timestamp self.trigger_timeout_call = None self.action_command = None self.in_delay = None
def setup_action_run(self): self.output_path = filehandler.OutputPath(tempfile.mkdtemp()) self.action_runner = mock.create_autospec( actioncommand.NoActionRunnerFactory) self.command = "do command %(actionname)s" self.rendered_command = "do command action_name" self.action_run = ActionRun("id", "action_name", mock.create_autospec(node.Node), self.command, output_path=self.output_path, action_runner=self.action_runner)
def setup_action_run(self): self.output_path = filehandler.OutputPath(tempfile.mkdtemp()) self.action_runner = mock.create_autospec( actioncommand.NoActionRunnerFactory, ) self.command = "do command {actionname}" self.action_run = SSHActionRun( job_run_id="id", name="action_name", node=mock.create_autospec(node.Node), bare_command=self.command, output_path=self.output_path, action_runner=self.action_runner, )
def build(self, job_config): log.debug(f"Building new job {job_config.name}") output_path = filehandler.OutputPath(self.output_stream_dir) time_zone = job_config.time_zone or self.time_zone scheduler = scheduler_from_config(job_config.schedule, time_zone) job = Job.from_config( job_config=job_config, scheduler=scheduler, parent_context=self.context, output_path=output_path, action_runner=self.action_runner, ) return JobScheduler(job)
def setup_runs(self): action_names = ['action_name', 'second_name', 'cleanup'] action_graph = [ mock.Mock(name=name, required_actions=[]) for name in action_names ] self.action_graph = actiongraph.ActionGraph( action_graph, dict((a.name, a) for a in action_graph)) self.output_path = filehandler.OutputPath(tempfile.mkdtemp()) self.command = "do command" self.action_runs = [self._build_run(name) for name in action_names] self.run_map = dict((a.action_name, a) for a in self.action_runs) self.run_map['cleanup'].is_cleanup = True self.collection = ActionRunCollection(self.action_graph, self.run_map)
def __init__(self, name, scheduler, queueing=True, all_nodes=False, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None, email_name=None, email=None, command=None, priority=None, owner=None, impact=None, expected_runtime=None, num_retries=None): super(Job, self).__init__() self.name = name self.email_name = name.split(".")[1].replace('_', ' ') self.email = email self.command = command self.priority = priority self.owner = owner self.impact = impact self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.expected_runtime = expected_runtime self.num_retries = num_retries self.output_path = output_path or filehandler.OutputPath() self.output_path.append(name) self.event = event.get_recorder(self.name) self.context = command_context.build_context(self, parent_context) self.event.ok('created')
def setup_action_run(self): self.output_path = filehandler.OutputPath(tempfile.mkdtemp()) self.action_runner = actioncommand.NoActionRunnerFactory() self.command = "do command {actionname}" self.rendered_command = "do command action_name" self.action_run = ActionRun( job_run_id="ns.id.0", name="action_name", node=mock.create_autospec(node.Node), bare_command=self.command, output_path=self.output_path, action_runner=self.action_runner, ) # These should be implemented in subclasses, we don't care here self.action_run.submit_command = mock.Mock() self.action_run.stop = mock.Mock() self.action_run.kill = mock.Mock()
def setup_collection(self): action_names = ['action_name', 'second_name', 'cleanup'] action_graph = [ Turtle(name=name, required_actions=[]) for name in action_names ] self.second_act = second_act = action_graph.pop(1) second_act.required_actions.append(action_graph[0]) action_map = dict((a.name, a) for a in action_graph) action_map['second_name'] = second_act self.action_graph = actiongraph.ActionGraph(action_graph, action_map) self.output_path = filehandler.OutputPath(tempfile.mkdtemp()) self.command = "do command" self.action_runs = [self._build_run(name) for name in action_names] self.run_map = dict((a.action_name, a) for a in self.action_runs) self.run_map['cleanup'].is_cleanup = True self.collection = ActionRunCollection(self.action_graph, self.run_map)
def __init__( self, name, scheduler, queueing=True, all_nodes=False, monitoring=None, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None, time_zone=None, expected_runtime=None, run_limit=None, ): super(Job, self).__init__() self.name = maybe_decode(name) self.monitoring = monitoring self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled # current enabled setting self.config_enabled = enabled # enabled attribute from file self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.time_zone = time_zone self.expected_runtime = expected_runtime self.output_path = output_path or filehandler.OutputPath() # if the name doesn't have a period, the "namespace" and the "job-name" will # be the same, we don't have to worry about a crash here self.output_path.append(name.split('.')[0]) # namespace self.output_path.append(name.split('.')[-1]) # job-name self.context = command_context.build_context(self, parent_context) self.run_limit = run_limit log.info(f'{self} created')
def __init__( self, name, scheduler, queueing=True, all_nodes=False, monitoring=None, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None, time_zone=None, expected_runtime=None, run_limit=None, ): super(Job, self).__init__() self.name = maybe_decode(name) self.monitoring = monitoring self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled # current enabled setting self.config_enabled = enabled # enabled attribute from file self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.time_zone = time_zone self.expected_runtime = expected_runtime self.output_path = output_path or filehandler.OutputPath() self.output_path.append(name) self.context = command_context.build_context(self, parent_context) self.run_limit = run_limit log.info(f'{self} created')
def _get_serializer(self, path=None): path = filehandler.OutputPath(path) if path else self._obj.output_path return filehandler.OutputStreamSerializer(path)
def __init__( self, job_run_id, name, node, bare_command=None, parent_context=None, output_path=None, cleanup=False, start_time=None, end_time=None, run_state=SCHEDULED, rendered_command=None, exit_status=None, action_runner=None, retries_remaining=None, retries_delay=None, exit_statuses=None, machine=None, executor=None, cpus=None, mem=None, constraints=None, docker_image=None, docker_parameters=None, env=None, extra_volumes=None, mesos_task_id=None, trigger_downstreams=None, triggered_by=None, on_upstream_rerun=None, ): super().__init__() self.job_run_id = maybe_decode(job_run_id) self.action_name = maybe_decode(name) self.node = node self.start_time = start_time self.end_time = end_time self.exit_status = exit_status self.bare_command = maybe_decode(bare_command) self.rendered_command = rendered_command self.action_runner = action_runner or NoActionRunnerFactory() self.machine = machine or Machine.from_machine(ActionRun.STATE_MACHINE, None, run_state) self.is_cleanup = cleanup self.executor = executor self.cpus = cpus self.mem = mem self.constraints = constraints self.docker_image = docker_image self.docker_parameters = docker_parameters self.env = env self.extra_volumes = extra_volumes self.mesos_task_id = mesos_task_id self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.context = command_context.build_context(self, parent_context) self.retries_remaining = retries_remaining self.retries_delay = retries_delay self.exit_statuses = exit_statuses self.trigger_downstreams = trigger_downstreams self.triggered_by = triggered_by self.on_upstream_rerun = on_upstream_rerun if self.exit_statuses is None: self.exit_statuses = [] self.action_command = None self.in_delay = None
def teardown_mcp(self): filehandler.OutputPath(self.test_dir).delete() filehandler.FileHandleManager.reset()