def execute(self, job_id): # Don't run a job more than the configured max number of retries self.last_job_executed_on = arrow.get() job = self.build.get_job(job_id) # Execute job result = self._execute(job) return result
def execute(self, job_id): # Don't run a job more than the configured max number of retries self.last_job_executed_on = arrow.get() job = self.build.get_job(job_id) TRANSITION_LOG.info("EXECUTION => Executing {} ({})".format(job_id, job.get_command())) # Execute job result = self._execute(job) return result
def _execute(self, job): # Don't allow the same job to execute multiple times if job in self.execution_times: TRANSITION_LOG.info("EXECUTION => Not executing already running job {} ({})".format(job.get_id(), job.get_command())) return self.execution_times[job] = arrow.get() TRANSITION_LOG.info("EXECUTION => Executing {} ({})".format(job.get_id(), job.get_command())) if callable(self.executor): return self.executor(job) else: return self.executor.execute(job)
def _submit_from_json(execution_manager, json_body): payload = json.loads(json_body) LOG.debug("Submitting job {}".format(payload)) # Clean up the payload a bit build_context = payload.get('build_context', {}) for k in ('start_time', 'end_time'): if k in build_context: LOG.debug("converting {}".format(k)) build_context[k] = arrow.get(build_context[k]) LOG.debug("build_context is {}".format(build_context)) execution_manager.submit(**payload)
def _check_for_timeouts(self): while self.running: PROCESSING_LOG.debug("TIMEOUTS => Checking for timeouts") timed_out_jobs = [] now = arrow.get() for job, timestamp in self.execution_times.items(): if (now - timestamp).total_seconds() > self.job_timeout: timed_out_jobs.append(job) for job in timed_out_jobs: self.execution_times.pop(job) self.executor.finish_job(job, ExecutionResult(is_async=False, status=False)) _interruptable_sleep(10)
def do_execute(self, job): build_graph = self.get_build_graph() command = job.get_command() job.set_should_run(False) job.set_stale(False) print "Simulation:", command target_relationships = build_graph.get_target_relationships(job.get_id()) produced_targets = {} for target_type, target_group in target_relationships.iteritems(): if target_type == "alternates": continue produced_targets.update(target_group) for target_id in produced_targets: target = build_graph.get_target(target_id) target.exists = True target.mtime = arrow.get().timestamp print "Simulation: Built target {}".format(target.get_id()) for dependent_job_id in build_graph.get_dependent_ids(target_id): dependent_job = build_graph.get_job(dependent_job_id) dependent_job.invalidate() # dependent_job.set_should_run(True) return ExecutionResult(is_async=False, status=True, stdout='', stderr='')
def _execute(self, job): self.execution_times[job] = arrow.get() if callable(self.executor): return self.executor(job) else: return self.executor.execute(job)