コード例 #1
0
    def test_purging_output_cleaned_after_ok_run(self):
        history_id, job_state, outputs = self._setup_running_two_output_job(10)

        # Pretty much right away after the job is running, these paths should be populated -
        # if they are grab them and make sure they are deleted at the end of the job.
        dataset_1 = self._get_history_item_as_admin(history_id,
                                                    outputs[0]["id"])
        dataset_2 = self._get_history_item_as_admin(history_id,
                                                    outputs[1]["id"])
        if "file_name" in dataset_1:
            output_dataset_paths = [
                dataset_1["file_name"], dataset_2["file_name"]
            ]
            # This may or may not exist depending on if the test is local or not.
            output_dataset_paths_exist = os.path.exists(
                output_dataset_paths[0])
        else:
            output_dataset_paths = []
            output_dataset_paths_exist = False

        if not output_dataset_paths_exist:
            # Given this Galaxy configuration - there is nothing more to be tested here.
            # Consider throwing a skip instead.
            return

        # Purge one of the two outputs and wait for the job to complete.
        self._raw_update_history_item(history_id, outputs[0]["id"],
                                      {"purged": True})
        wait_on_state(job_state, assert_ok=True)

        if output_dataset_paths_exist:
            time.sleep(.5)
            # Make sure the non-purged dataset is on disk and the purged one is not.
            assert os.path.exists(output_dataset_paths[1])
            assert not os.path.exists(output_dataset_paths[0])
コード例 #2
0
ファイル: test_jobs.py プロジェクト: bwlang/galaxy
    def test_purging_output_cleaned_after_ok_run(self):
        history_id, job_state, outputs = self._setup_running_two_output_job(10)

        # Pretty much right away after the job is running, these paths should be populated -
        # if they are grab them and make sure they are deleted at the end of the job.
        dataset_1 = self._get_history_item_as_admin(history_id, outputs[0]["id"])
        dataset_2 = self._get_history_item_as_admin(history_id, outputs[1]["id"])
        if "file_name" in dataset_1:
            output_dataset_paths = [dataset_1["file_name"], dataset_2["file_name"]]
            # This may or may not exist depending on if the test is local or not.
            output_dataset_paths_exist = os.path.exists(output_dataset_paths[0])
        else:
            output_dataset_paths = []
            output_dataset_paths_exist = False

        if not output_dataset_paths_exist:
            # Given this Galaxy configuration - there is nothing more to be tested here.
            # Consider throwing a skip instead.
            return

        # Purge one of the two outputs and wait for the job to complete.
        self._raw_update_history_item(history_id, outputs[0]["id"], {"purged": True})
        wait_on_state(job_state, assert_ok=True)

        if output_dataset_paths_exist:
            time.sleep(.5)
            # Make sure the non-purged dataset is on disk and the purged one is not.
            assert os.path.exists(output_dataset_paths[1])
            assert not os.path.exists(output_dataset_paths[0])
コード例 #3
0
    def _setup_running_two_output_job(self, sleep_time):
        history_id = self.dataset_populator.new_history()
        payload = self.dataset_populator.run_tool_payload(
            tool_id='create_2',
            inputs=dict(sleep_time=sleep_time, ),
            history_id=history_id,
        )
        run_response = self._post("tools", data=payload).json()
        outputs = run_response["outputs"]
        jobs = run_response["jobs"]

        assert len(outputs) == 2
        assert len(jobs) == 1

        def job_state():
            jobs_response = self._get("jobs/%s" % jobs[0]["id"])
            return jobs_response

        # Give job some time to get up and running.
        time.sleep(2)
        running_state = wait_on_state(job_state,
                                      skip_states=["queued", "new"],
                                      assert_ok=False,
                                      timeout=15)
        assert running_state == "running", running_state

        def job_state():
            jobs_response = self._get("jobs/%s" % jobs[0]["id"])
            return jobs_response

        return history_id, job_state, outputs
コード例 #4
0
ファイル: test_jobs.py プロジェクト: bwlang/galaxy
    def _setup_running_two_output_job(self, sleep_time):
        history_id = self.dataset_populator.new_history()
        payload = self.dataset_populator.run_tool_payload(
            tool_id='create_2',
            inputs=dict(
                sleep_time=sleep_time,
            ),
            history_id=history_id,
        )
        run_response = self._post("tools", data=payload).json()
        outputs = run_response["outputs"]
        jobs = run_response["jobs"]

        assert len(outputs) == 2
        assert len(jobs) == 1

        def job_state():
            jobs_response = self._get("jobs/%s" % jobs[0]["id"])
            return jobs_response

        # Give job some time to get up and running.
        time.sleep(2)
        running_state = wait_on_state(job_state, skip_states=["queued", "new"], assert_ok=False, timeout=15)
        assert running_state == "running", running_state

        def job_state():
            jobs_response = self._get("jobs/%s" % jobs[0]["id"])
            return jobs_response

        return history_id, job_state, outputs
コード例 #5
0
    def test_purging_output_keep_running_until_all_purged( self ):
        history_id, job_state, outputs = self._setup_running_two_output_job( 120 )

        # Pretty much right away after the job is running, these paths should be populated -
        # if they are grab them and make sure they are deleted at the end of the job.
        dataset_1 = self._get_history_item_as_admin( history_id, outputs[0]["id"] )
        dataset_2 = self._get_history_item_as_admin( history_id, outputs[1]["id"] )
        if "file_name" in dataset_1:
            output_dataset_paths = [ dataset_1[ "file_name" ], dataset_2[ "file_name" ] ]
            # This may or may not exist depending on if the test is local or not.
            output_dataset_paths_exist = os.path.exists( output_dataset_paths[ 0 ] )
        else:
            output_dataset_paths = []
            output_dataset_paths_exist = False

        current_state = job_state().json()["state"]
        assert current_state == "running", current_state

        # Purge one of the two outputs and make sure the job is still running.
        self._raw_update_history_item( history_id, outputs[0]["id"], {"purged": True} )
        time.sleep( 1 )
        current_state = job_state().json()["state"]
        assert current_state == "running", current_state

        # Purge the second output and make sure the job is cancelled.
        self._raw_update_history_item( history_id, outputs[1]["id"], {"purged": True} )
        final_state = wait_on_state( job_state, assert_ok=False, timeout=15 )
        assert final_state in ["deleted_new", "deleted"], final_state

        def paths_deleted():
            if not os.path.exists( output_dataset_paths[ 0 ] ) and not os.path.exists( output_dataset_paths[ 1 ] ):
                return True

        if output_dataset_paths_exist:
            wait_on(paths_deleted, "path deletion")
コード例 #6
0
ファイル: test_libraries.py プロジェクト: glormph/galaxy
    def test_create_dataset( self ):
        library = self.library_populator.new_private_library( "ForCreateDatasets" )
        payload, files = self.library_populator.create_dataset_request( library, file_type="txt", contents="create_test" )
        create_response = self._post( "libraries/%s/contents" % library[ "id" ], payload, files=files )
        self._assert_status_code_is( create_response, 200 )
        library_datasets = create_response.json()
        assert len( library_datasets ) == 1
        library_dataset = library_datasets[ 0 ]

        def show():
            return self._get( "libraries/%s/contents/%s" % ( library[ "id" ], library_dataset[ "id" ] ) )

        wait_on_state( show, assert_ok=True )
        library_dataset = show().json()
        self._assert_has_keys( library_dataset, "peek", "data_type" )
        assert library_dataset[ "peek" ].find("create_test") >= 0
        assert library_dataset[ "file_ext" ] == "txt", library_dataset[ "file_ext" ]
コード例 #7
0
    def test_deleting_output_keep_running_until_all_deleted( self ):
        history_id, job_state, outputs = self._setup_running_two_output_job( 120 )

        # Delete one of the two outputs and make sure the job is still running.
        self._raw_update_history_item( history_id, outputs[0]["id"], {"deleted": True} )
        time.sleep( 1 )
        state = job_state().json()["state"]
        assert state == "running", state

        # Delete the second output and make sure the job is cancelled.
        self._raw_update_history_item( history_id, outputs[1]["id"], {"deleted": True} )
        final_state = wait_on_state( job_state, assert_ok=False, timeout=15 )
        assert final_state in ["deleted_new", "deleted"], final_state
コード例 #8
0
    def test_create_dataset(self):
        library = self.library_populator.new_private_library(
            "ForCreateDatasets")
        payload, files = self.library_populator.create_dataset_request(
            library, file_type="txt", contents="create_test")
        create_response = self._post("libraries/%s/contents" % library["id"],
                                     payload,
                                     files=files)
        self._assert_status_code_is(create_response, 200)
        library_datasets = create_response.json()
        assert len(library_datasets) == 1
        library_dataset = library_datasets[0]

        def show():
            return self._get("libraries/%s/contents/%s" %
                             (library["id"], library_dataset["id"]))

        wait_on_state(show, assert_ok=True)
        library_dataset = show().json()
        self._assert_has_keys(library_dataset, "peek", "data_type")
        assert library_dataset["peek"].find("create_test") >= 0
        assert library_dataset["file_ext"] == "txt", library_dataset[
            "file_ext"]