def test_purging_output_keep_running_until_all_purged( self ): history_id, job_state, outputs = self._setup_running_two_output_job( 120 ) # Pretty much right away after the job is running, these paths should be populated - # if they are grab them and make sure they are deleted at the end of the job. dataset_1 = self._get_history_item_as_admin( history_id, outputs[0]["id"] ) dataset_2 = self._get_history_item_as_admin( history_id, outputs[1]["id"] ) if "file_name" in dataset_1: output_dataset_paths = [ dataset_1[ "file_name" ], dataset_2[ "file_name" ] ] # This may or may not exist depending on if the test is local or not. output_dataset_paths_exist = os.path.exists( output_dataset_paths[ 0 ] ) else: output_dataset_paths = [] output_dataset_paths_exist = False current_state = job_state().json()["state"] assert current_state == "running", current_state # Purge one of the two outputs and make sure the job is still running. self._raw_update_history_item( history_id, outputs[0]["id"], {"purged": True} ) time.sleep( 1 ) current_state = job_state().json()["state"] assert current_state == "running", current_state # Purge the second output and make sure the job is cancelled. self._raw_update_history_item( history_id, outputs[1]["id"], {"purged": True} ) final_state = wait_on_state( job_state, assert_ok=False, timeout=15 ) assert final_state in ["deleted_new", "deleted"], final_state def paths_deleted(): if not os.path.exists( output_dataset_paths[ 0 ] ) and not os.path.exists( output_dataset_paths[ 1 ] ): return True if output_dataset_paths_exist: wait_on(paths_deleted, "path deletion")
def _reimport_history(self, history_id, history_name): # Ensure the history is ready to go... self.dataset_populator.wait_for_history(history_id, assert_ok=True) # Export the history. download_path = self._export(history_id) # Create download for history full_download_url = "%s%s?key=%s" % (self.url, download_path, self.galaxy_interactor.api_key) download_response = get(full_download_url) self._assert_status_code_is(download_response, 200) def history_names(): history_index = self._get("histories") return dict((h["name"], h) for h in history_index.json()) import_name = "imported from archive: %s" % history_name assert import_name not in history_names() import_data = dict(archive_source=full_download_url, archive_type="url") import_response = self._post("histories", data=import_data) self._assert_status_code_is(import_response, 200) def has_history_with_name(): histories = history_names() return histories.get(import_name, None) imported_history = wait_on(has_history_with_name, desc="import history") imported_history_id = imported_history["id"] self.dataset_populator.wait_for_history(imported_history_id) return imported_history_id
def wait_on_entry_points_active(self, job_id, expected_num=1): def active_entry_points(): entry_points = self.entry_points_for_job(job_id) if len(entry_points) != expected_num: return None elif any([not e["active"] for e in entry_points]): return None else: return entry_points return wait_on(active_entry_points, "entry points to become active")
def _export(self, history_id): export_url = self._api_url("histories/%s/exports" % history_id, use_key=True) put_response = put(export_url) self._assert_status_code_is(put_response, 202) def export_ready_response(): put_response = put(export_url) if put_response.status_code == 202: return None return put_response put_response = wait_on(export_ready_response, desc="export ready") self._assert_status_code_is(put_response, 200) response = put_response.json() self._assert_has_keys(response, "download_url") download_path = response["download_url"] return download_path
def wait_on_proxied_content(self, target): def get_hosted_content(): try: scheme, rest = target.split("://", 1) prefix, host_and_port = rest.split(".realtime.") print(rest) faked_host = rest if "/" in rest: faked_host = rest.split("/", 1)[0] response = requests.get("%s://%s" % (scheme, host_and_port), timeout=1, headers={"Host": faked_host}) return response.content except Exception as e: print(e) return None content = wait_on(get_hosted_content, "realtime hosted content at %s" % target) return content
def test_import_export(self): history_id = self.dataset_populator.new_history(name="for_export") self.dataset_populator.new_dataset(history_id, content="1 2 3") self.dataset_populator.wait_for_history(history_id, assert_ok=True) download_path = self._export(history_id) full_download_url = "%s%s?key=%s" % (self.url, download_path, self.galaxy_interactor.api_key) download_response = get(full_download_url) self._assert_status_code_is(download_response, 200) def history_names(): history_index = self._get("histories") return dict(map(lambda h: (h["name"], h), history_index.json())) import_name = "imported from archive: for_export" assert import_name not in history_names() import_data = dict(archive_source=full_download_url, archive_type="url") import_response = self._post("histories", data=import_data) self._assert_status_code_is(import_response, 200) def has_history_with_name(): histories = history_names() return histories.get(import_name, None) imported_history = wait_on(has_history_with_name, desc="import history") imported_history_id = imported_history["id"] self.dataset_populator.wait_for_history(imported_history_id) contents_response = self._get("histories/%s/contents" % imported_history_id) self._assert_status_code_is(contents_response, 200) contents = contents_response.json() assert len(contents) == 1 imported_content = self.dataset_populator.get_history_dataset_content( history_id=imported_history_id, dataset_id=contents[0]["id"]) assert imported_content == "1 2 3\n"
def test_import_export( self ): history_id = self.dataset_populator.new_history( name="for_export" ) self.dataset_populator.new_dataset( history_id, content="1 2 3" ) self.dataset_populator.wait_for_history( history_id, assert_ok=True ) download_path = self._export( history_id ) full_download_url = "%s%s?key=%s" % ( self.url, download_path, self.galaxy_interactor.api_key ) download_response = get( full_download_url ) self._assert_status_code_is( download_response, 200 ) def history_names(): history_index = self._get( "histories" ) return dict((h["name"], h) for h in history_index.json()) import_name = "imported from archive: for_export" assert import_name not in history_names() import_data = dict( archive_source=full_download_url, archive_type="url" ) import_response = self._post( "histories", data=import_data ) self._assert_status_code_is( import_response, 200 ) def has_history_with_name(): histories = history_names() return histories.get( import_name, None ) imported_history = wait_on( has_history_with_name, desc="import history" ) imported_history_id = imported_history[ "id" ] self.dataset_populator.wait_for_history( imported_history_id ) contents_response = self._get( "histories/%s/contents" % imported_history_id ) self._assert_status_code_is( contents_response, 200 ) contents = contents_response.json() assert len( contents ) == 1 imported_content = self.dataset_populator.get_history_dataset_content( history_id=imported_history_id, dataset_id=contents[ 0 ][ "id" ] ) assert imported_content == "1 2 3\n"