def __init__( self, user, token=None): if token: self.token = token else: self.token = unique_id() self.user = user self.expiration_time = now() + timedelta(hours=24)
def __init__(self, user, token=None): if token: self.token = token else: self.token = unique_id() self.user = user self.expiration_time = now() + timedelta(hours=24)
def queue_job(self, job_wrapper): resource_parameters = job_wrapper.get_resource_parameters() try: test_name = resource_parameters["test_name"] except KeyError: job_wrapper.fail( "Job resource parameter test_name not set as required for this job runner." ) return job_dest_params = job_wrapper.job_destination.params if test_name == "test_walltime_resubmission": assert job_dest_params["dest_name"] == "retry_test_more_walltime" elif test_name == "test_memory_resubmission": assert job_dest_params["dest_name"] == "retry_test_more_mem" elif test_name == "test_unknown_error": assert job_dest_params["dest_name"] == "retry_unknown_error" elif test_name == "test_resubmission_after_delay": assert job_dest_params["dest_name"] == "retry_after_delay" job = job_wrapper.get_job() if (now() - job.create_time).total_seconds() < 5: self._fail_job_local(job_wrapper, "Job completed too quickly") return super(AssertionJobRunner, self).queue_job(job_wrapper)
def get_active_processes(self, last_seen_seconds=None): """Return all processes seen in ``last_seen_seconds`` seconds.""" if last_seen_seconds is None: last_seen_seconds = self.heartbeat_interval seconds_ago = now() - datetime.timedelta(seconds=last_seen_seconds) return self.sa_session.query(WorkerProcess).filter( WorkerProcess.table.c.update_time > seconds_ago).all()
def _workflow_invocation_update(self): session = object_session(self) table = self.table now_val = now() stmt = table.update().values(update_time=now_val).where( and_(table.c.id == self.id, table.c.update_time < now_val)) session.execute(stmt)
def update_watcher_designation(self): worker_process = self.worker_process if not worker_process: worker_process = WorkerProcess(server_name=self.server_name, hostname=self.hostname) worker_process.update_time = now() worker_process.pid = self.pid self.sa_session.add(worker_process) self.sa_session.flush() # We only want a single process watching the various config files on the file system. # We just pick the max server name for simplicity is_config_watcher = self.server_name == max( (p.server_name for p in self.get_active_processes(self.heartbeat_interval + 1))) if is_config_watcher != self.is_config_watcher: self.is_config_watcher = is_config_watcher
def send_database_heartbeat(self): if self.active: while not self.exit.isSet(): worker_process = self.sa_session.query( WorkerProcess).filter_by( server_name=self.server_name, hostname=self.hostname, ).first() if not worker_process: worker_process = WorkerProcess( server_name=self.server_name, hostname=self.hostname) worker_process.update_time = now() self.sa_session.add(worker_process) self.sa_session.flush() self.exit.wait(self.heartbeat_interval)
def ready_galaxy_markdown_for_export(trans, internal_galaxy_markdown): """Fill in details needed to render Galaxy flavored markdown. Take it from a minimal internal version to an externally render-able version with more details populated and actual IDs replaced with encoded IDs to render external links. Return expanded markdown and extra data useful for rendering custom container tags. """ extra_rendering_data = { "generate_time": now().isoformat(), "generate_version": trans.app.config.version_major, } # Walk Galaxy directives inside the Galaxy Markdown and collect dict-ified data # needed to render this efficiently. directive_handler = ReadyForExportMarkdownDirectiveHandler(trans, extra_rendering_data) export_markdown = directive_handler.walk(trans, internal_galaxy_markdown) return export_markdown, extra_rendering_data
def queue_job(self, job_wrapper): resource_parameters = job_wrapper.get_resource_parameters() try: test_name = resource_parameters["test_name"] except KeyError: job_wrapper.fail("Job resource parameter test_name not set as required for this job runner.") return job_dest_params = job_wrapper.job_destination.params if test_name == "test_walltime_resubmission": assert job_dest_params["dest_name"] == "retry_test_more_walltime" elif test_name == "test_memory_resubmission": assert job_dest_params["dest_name"] == "retry_test_more_mem" elif test_name == "test_unknown_error": assert job_dest_params["dest_name"] == "retry_unknown_error" elif test_name == "test_resubmission_after_delay": assert job_dest_params["dest_name"] == "retry_after_delay" job = job_wrapper.get_job() if (now() - job.create_time).total_seconds() < 5: self._fail_job_local(job_wrapper, "Job completed too quickly") return super(AssertionJobRunner, self).queue_job(job_wrapper)
def _remap(container, line): id_match = re.search(UNENCODED_ID_PATTERN, line) object_id = None encoded_id = None if id_match: object_id = int(id_match.group(2)) encoded_id = trans.security.encode_id(object_id) line = line.replace(id_match.group(), f"{id_match.group(1)}={encoded_id}") if container == "history_link": _check_object(object_id, line) history = history_manager.get_accessible(object_id, trans.user) rval = self.handle_history_link(line, history) elif container == "history_dataset_display": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_display(line, hda) elif container == "history_dataset_link": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_display(line, hda) elif container == "history_dataset_index": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_display(line, hda) elif container == "history_dataset_embedded": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_embedded(line, hda) elif container == "history_dataset_as_image": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_as_image(line, hda) elif container == "history_dataset_peek": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_peek(line, hda) elif container == "history_dataset_info": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_info(line, hda) elif container == "history_dataset_type": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_type(line, hda) elif container == "history_dataset_name": _check_object(object_id, line) hda = hda_manager.get_accessible(object_id, trans.user) rval = self.handle_dataset_name(line, hda) elif container == "workflow_display": stored_workflow = workflow_manager.get_stored_accessible_workflow(trans, encoded_id) rval = self.handle_workflow_display(line, stored_workflow) elif container == "history_dataset_collection_display": hdca = collection_manager.get_dataset_collection_instance(trans, "history", encoded_id) rval = self.handle_dataset_collection_display(line, hdca) elif container == "tool_stdout": job = job_manager.get_accessible_job(trans, object_id) rval = self.handle_tool_stdout(line, job) elif container == "tool_stderr": job = job_manager.get_accessible_job(trans, object_id) rval = self.handle_tool_stderr(line, job) elif container == "job_parameters": job = job_manager.get_accessible_job(trans, object_id) rval = self.handle_job_parameters(line, job) elif container == "job_metrics": job = job_manager.get_accessible_job(trans, object_id) rval = self.handle_job_metrics(line, job) elif container == "generate_galaxy_version": version = trans.app.config.version_major rval = self.handle_generate_galaxy_version(line, version) elif container == "generate_time": rval = self.handle_generate_time(line, now()) elif container == "invocation_time": invocation = workflow_manager.get_invocation(trans, object_id) rval = self.handle_invocation_time(line, invocation) elif container == "visualization": rval = None else: raise MalformedContents(f"Unknown Galaxy Markdown directive encountered [{container}].") if rval is not None: return rval else: return (line, False)
def _new_invocation(self): invocation = model.WorkflowInvocation() invocation.id = 1 invocation.create_time = now() return invocation