コード例 #1
0
    def test_purging_output_keep_running_until_all_purged(self, history_id):
        job_state, outputs = self._setup_running_two_output_job(
            history_id, 120)

        # Pretty much right away after the job is running, these paths should be populated -
        # if they are grab them and make sure they are deleted at the end of the job.
        dataset_1 = self._get_history_item_as_admin(history_id,
                                                    outputs[0]["id"])
        dataset_2 = self._get_history_item_as_admin(history_id,
                                                    outputs[1]["id"])
        if "file_name" in dataset_1:
            output_dataset_paths = [
                dataset_1["file_name"], dataset_2["file_name"]
            ]
            # This may or may not exist depending on if the test is local or not.
            output_dataset_paths_exist = os.path.exists(
                output_dataset_paths[0])
        else:
            output_dataset_paths = []
            output_dataset_paths_exist = False

        self._hack_to_skip_test_if_state_ok(job_state)

        current_state = job_state().json()["state"]
        assert current_state == "running", current_state

        # Purge one of the two outputs and make sure the job is still running.
        self._raw_update_history_item(history_id, outputs[0]["id"],
                                      {"purged": True})
        time.sleep(1)

        self._hack_to_skip_test_if_state_ok(job_state)

        current_state = job_state().json()["state"]
        assert current_state == "running", current_state

        # Purge the second output and make sure the job is cancelled.
        self._raw_update_history_item(history_id, outputs[1]["id"],
                                      {"purged": True})
        final_state = wait_on_state(job_state, assert_ok=False, timeout=15)
        assert final_state in ["deleted_new", "deleted"], final_state

        def paths_deleted():
            if not os.path.exists(
                    output_dataset_paths[0]) and not os.path.exists(
                        output_dataset_paths[1]):
                return True

        if output_dataset_paths_exist:
            wait_on(paths_deleted, "path deletion")
コード例 #2
0
ファイル: test_celery_tasks.py プロジェクト: mvdbeek/galaxy
    def test_galaxy_task(self):
        history_id = self.dataset_populator.new_history()
        dataset = self.dataset_populator.new_dataset(history_id, wait=True)
        hda = self._latest_hda
        assert hda

        def hda_purged():
            latest_details = self.dataset_populator.get_history_dataset_details(
                history_id, dataset=dataset, assert_ok=False, wait=False)
            return True if latest_details["purged"] else None

        assert not hda_purged()

        purge_hda.delay(hda_id=hda.id).get(timeout=10)

        wait_on(hda_purged, "dataset to become purged")
        assert hda_purged()
コード例 #3
0
    def wait_on_entry_points_active(self, job_id, expected_num=1):
        def active_entry_points():
            entry_points = self.entry_points_for_job(job_id)
            if len(entry_points) != expected_num:
                return None
            elif any([not e["active"] for e in entry_points]):
                return None
            else:
                return entry_points

        return wait_on(active_entry_points, "entry points to become active")
コード例 #4
0
    def wait_on_entry_points_active(self, job_id, expected_num=1):
        def active_entry_points():
            entry_points = self.entry_points_for_job(job_id)
            if len(entry_points) != expected_num:
                return None
            elif any(not e["active"] for e in entry_points):
                job_json = self._get(f"jobs/{job_id}?full=true").json()
                if job_json['state'] == 'error':
                    raise Exception(
                        f"Interactive tool job {job_id} failed: {job_json}")
                return None
            else:
                return entry_points

        # It currently takes at least 90 seconds until we can be sure the container monitor failed.
        # Can be decreased when galaxy_ext/container_monitor/monitor.py changes
        return wait_on(active_entry_points,
                       "entry points to become active",
                       timeout=120)
コード例 #5
0
    def wait_on_proxied_content(self, target):
        def get_hosted_content():
            try:
                scheme, rest = target.split("://", 1)
                prefix, host_and_port = rest.split(".interactivetool.")
                faked_host = rest
                if "/" in rest:
                    faked_host = rest.split("/", 1)[0]
                url = "%s://%s" % (scheme, host_and_port)
                response = requests.get(url,
                                        timeout=1,
                                        headers={"Host": faked_host})
                return response.text
            except Exception as e:
                print(e)
                return None

        content = wait_on(get_hosted_content,
                          "realtime hosted content at %s" % target)
        return content