def __init__(self, job_run_id, name, node, bare_command=None, parent_context=None, output_path=None, cleanup=False, start_time=None, end_time=None, run_state=STATE_SCHEDULED, rendered_command=None, exit_status=None, action_runner=None): self.job_run_id = job_run_id self.action_name = name self.node = node self.start_time = start_time self.end_time = end_time self.exit_status = exit_status self.bare_command = bare_command self.rendered_command = rendered_command self.action_runner = action_runner or NoActionRunnerFactory self.machine = state.StateMachine(self.STATE_SCHEDULED, delegate=self, force_state=run_state) self.is_cleanup = cleanup self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.context = command_context.build_context(self, parent_context)
def __init__(self, name, scheduler, queueing=True, all_nodes=False, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None, email_name=None, email=None, command=None, priority=None, owner=None, impact=None, expected_runtime=None, num_retries=None): super(Job, self).__init__() self.name = name self.email_name = name.split(".")[1].replace('_', ' ') self.email = email self.command = command self.priority = priority self.owner = owner self.impact = impact self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.expected_runtime = expected_runtime self.num_retries = num_retries self.output_path = output_path or filehandler.OutputPath() self.output_path.append(name) self.event = event.get_recorder(self.name) self.context = command_context.build_context(self, parent_context) self.event.ok('created')
def __init__(self, name, scheduler, queueing=True, all_nodes=False, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None): super(Job, self).__init__() self.name = name self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.output_path = output_path or filehandler.OutputPath() self.output_path.append(name) self.event = event.get_recorder(self.name) self.context = command_context.build_context(self, parent_context) self.event.ok('created')
def __init__(self, job_name, run_num, run_time, node, output_path=None, base_context=None, action_runs=None, action_graph=None, manual=None): super(JobRun, self).__init__() self.job_name = job_name self.run_num = run_num self.run_time = run_time self.node = node self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.action_runs_proxy = None self._action_runs = None self.action_graph = action_graph self.manual = manual self.event = event.get_recorder(self.full_id) self.event.ok('created') if action_runs: self.action_runs = action_runs self.context = command_context.build_context(self, base_context)
def __init__( self, job_name, run_num, run_time, node, output_path=None, base_context=None, action_runs=None, action_graph=None, manual=None, ): super(JobRun, self).__init__() self.job_name = maybe_decode(job_name) self.run_num = run_num self.run_time = run_time self.node = node self.output_path = output_path or filehandler.OutputPath() self.output_path.append(str(self.run_num)) self.action_runs_proxy = None self._action_runs = None self.action_graph = action_graph self.manual = manual if action_runs: self.action_runs = action_runs self.context = command_context.build_context(self, base_context)
def __init__( self, job_name, run_num, run_time, node, output_path=None, base_context=None, action_runs=None, action_graph=None, manual=None, ): super(JobRun, self).__init__() self.job_name = maybe_decode(job_name) self.run_num = run_num self.run_time = run_time self.node = node self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.action_runs_proxy = None self._action_runs = None self.action_graph = action_graph self.manual = manual if action_runs: self.action_runs = action_runs self.context = command_context.build_context(self, base_context)
def __init__(self, name, scheduler, queueing=True, all_nodes=False, owner='', summary='', notes='', node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None): super(Job, self).__init__() self.name = name self.owner = owner self.summary = summary self.notes = notes self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.output_path = output_path or filehandler.OutputPath() self.output_path.append(name) self.event = event.get_recorder(self.name) self.context = command_context.build_context(self, parent_context) self.event.ok('created')
def __init__( self, job_run_id, name, node, command_config, parent_context=None, output_path=None, cleanup=False, start_time=None, end_time=None, run_state=SCHEDULED, exit_status=None, attempts=None, action_runner=None, retries_remaining=None, retries_delay=None, machine=None, executor=None, trigger_downstreams=None, triggered_by=None, on_upstream_rerun=None, trigger_timeout_timestamp=None, original_command=None, ): super().__init__() self.job_run_id = maybe_decode(job_run_id) self.action_name = maybe_decode(name) self.node = node self.start_time = start_time self.end_time = end_time self.exit_status = exit_status self.action_runner = action_runner or NoActionRunnerFactory() self.machine = machine or Machine.from_machine( ActionRun.STATE_MACHINE, None, run_state ) self.is_cleanup = cleanup self.executor = executor self.command_config = command_config self.original_command = original_command or command_config.command self.attempts = attempts or [] self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.action_name) self.context = command_context.build_context(self, parent_context) self.retries_remaining = retries_remaining self.retries_delay = retries_delay self.trigger_downstreams = trigger_downstreams self.triggered_by = triggered_by self.on_upstream_rerun = on_upstream_rerun self.trigger_timeout_timestamp = trigger_timeout_timestamp self.trigger_timeout_call = None self.action_command = None self.in_delay = None
def __init__(self, config, node, instance_number, parent_context): self.config = config self.node = node self.instance_number = instance_number self.id = "%s.%s" % (config.name, self.instance_number) start_state = ServiceInstance.STATE_DOWN self.machine = state.StateMachine(start_state, delegate=self) self.parent_context = parent_context self.context = command_context.build_context(self, parent_context) self.failures = []
def __init__(self, job_run_id, name, node, bare_command=None, parent_context=None, output_path=None, cleanup=False, start_time=None, end_time=None, run_state=STATE_SCHEDULED, rendered_command=None): self.job_run_id = job_run_id self.action_name = name self.node = node self.start_time = start_time self.end_time = end_time self.exit_status = None self.bare_command = bare_command self.rendered_command = rendered_command self.machine = state.StateMachine( self.STATE_SCHEDULED, delegate=self, force_state=run_state) self.is_cleanup = cleanup self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.context = command_context.build_context(self, parent_context)
def __init__( self, name, scheduler, queueing=True, all_nodes=False, monitoring=None, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None, time_zone=None, expected_runtime=None, run_limit=None, ): super(Job, self).__init__() self.name = maybe_decode(name) self.monitoring = monitoring self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled # current enabled setting self.config_enabled = enabled # enabled attribute from file self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.time_zone = time_zone self.expected_runtime = expected_runtime self.output_path = output_path or filehandler.OutputPath() # if the name doesn't have a period, the "namespace" and the "job-name" will # be the same, we don't have to worry about a crash here self.output_path.append(name.split('.')[0]) # namespace self.output_path.append(name.split('.')[-1]) # job-name self.context = command_context.build_context(self, parent_context) self.run_limit = run_limit log.info(f'{self} created')
def __init__( self, name, scheduler, queueing=True, all_nodes=False, monitoring=None, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None, time_zone=None, expected_runtime=None, run_limit=None, ): super(Job, self).__init__() self.name = maybe_decode(name) self.monitoring = monitoring self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled # current enabled setting self.config_enabled = enabled # enabled attribute from file self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.time_zone = time_zone self.expected_runtime = expected_runtime self.output_path = output_path or filehandler.OutputPath() self.output_path.append(name) self.context = command_context.build_context(self, parent_context) self.run_limit = run_limit log.info(f'{self} created')
def __init__(self, job_name, run_num, run_time, node, output_path=None, base_context=None, action_runs=None, action_graph=None, manual=None): super(JobRun, self).__init__() self.job_name = job_name self.run_num = run_num self.run_time = run_time self.node = node self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.action_runs_proxy = None self._action_runs = None self.action_graph = action_graph self.manual = manual self.event = event.get_recorder(self.id) self.event.ok('created') if action_runs: self.action_runs = action_runs self.context = command_context.build_context(self, base_context)
def __init__( self, name, scheduler, queueing=True, all_nodes=False, monitoring=None, node_pool=None, enabled=True, action_graph=None, run_collection=None, parent_context=None, output_path=None, allow_overlap=None, action_runner=None, max_runtime=None, time_zone=None, expected_runtime=None ): super(Job, self).__init__() self.name = maybe_decode(name) self.monitoring = monitoring self.action_graph = action_graph self.scheduler = scheduler self.runs = run_collection self.queueing = queueing self.all_nodes = all_nodes self.enabled = enabled # current enabled setting self.config_enabled = enabled # enabled attribute from file self.node_pool = node_pool self.allow_overlap = allow_overlap self.action_runner = action_runner self.max_runtime = max_runtime self.time_zone = time_zone self.expected_runtime = expected_runtime self.output_path = output_path or filehandler.OutputPath() self.output_path.append(name) self.context = command_context.build_context(self, parent_context) log.info(f'{self} created')
def __init__( self, job_run_id, name, node, bare_command=None, parent_context=None, output_path=None, cleanup=False, start_time=None, end_time=None, run_state=SCHEDULED, rendered_command=None, exit_status=None, action_runner=None, retries_remaining=None, retries_delay=None, exit_statuses=None, machine=None, executor=None, cpus=None, mem=None, constraints=None, docker_image=None, docker_parameters=None, env=None, extra_volumes=None, mesos_task_id=None, trigger_downstreams=None, triggered_by=None, on_upstream_rerun=None, ): super().__init__() self.job_run_id = maybe_decode(job_run_id) self.action_name = maybe_decode(name) self.node = node self.start_time = start_time self.end_time = end_time self.exit_status = exit_status self.bare_command = maybe_decode(bare_command) self.rendered_command = rendered_command self.action_runner = action_runner or NoActionRunnerFactory() self.machine = machine or Machine.from_machine(ActionRun.STATE_MACHINE, None, run_state) self.is_cleanup = cleanup self.executor = executor self.cpus = cpus self.mem = mem self.constraints = constraints self.docker_image = docker_image self.docker_parameters = docker_parameters self.env = env self.extra_volumes = extra_volumes self.mesos_task_id = mesos_task_id self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.context = command_context.build_context(self, parent_context) self.retries_remaining = retries_remaining self.retries_delay = retries_delay self.exit_statuses = exit_statuses self.trigger_downstreams = trigger_downstreams self.triggered_by = triggered_by self.on_upstream_rerun = on_upstream_rerun if self.exit_statuses is None: self.exit_statuses = [] self.action_command = None self.in_delay = None
def __init__( self, job_run_id, name, node, bare_command=None, parent_context=None, output_path=None, cleanup=False, start_time=None, end_time=None, run_state=SCHEDULED, rendered_command=None, exit_status=None, action_runner=None, retries_remaining=None, retries_delay=None, exit_statuses=None, machine=None, executor=None, cpus=None, mem=None, disk=None, constraints=None, docker_image=None, docker_parameters=None, env=None, extra_volumes=None, mesos_task_id=None, trigger_downstreams=None, triggered_by=None, on_upstream_rerun=None, trigger_timeout_timestamp=None, ): super().__init__() self.job_run_id = maybe_decode(job_run_id) self.action_name = maybe_decode(name) self.node = node self.start_time = start_time self.end_time = end_time self.exit_status = exit_status self.bare_command = maybe_decode(bare_command) self.rendered_command = rendered_command self.action_runner = action_runner or NoActionRunnerFactory() self.machine = machine or Machine.from_machine( ActionRun.STATE_MACHINE, None, run_state ) self.is_cleanup = cleanup self.executor = executor self.cpus = cpus self.mem = mem self.disk = disk self.constraints = constraints self.docker_image = docker_image self.docker_parameters = docker_parameters self.env = env self.extra_volumes = extra_volumes self.mesos_task_id = mesos_task_id self.output_path = output_path or filehandler.OutputPath() self.output_path.append(self.id) self.context = command_context.build_context(self, parent_context) self.retries_remaining = retries_remaining self.retries_delay = retries_delay self.exit_statuses = exit_statuses self.trigger_downstreams = trigger_downstreams self.triggered_by = triggered_by self.on_upstream_rerun = on_upstream_rerun self.trigger_timeout_timestamp = trigger_timeout_timestamp self.trigger_timeout_call = None if self.exit_statuses is None: self.exit_statuses = [] self.action_command = None self.in_delay = None