class WorkflowFrameworkPerformanceTestCase(PerformanceTestCase):
    framework_tool_and_types = True

    def setUp(self):
        super().setUp()
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)

    def test_run_simple(self):
        self._run_performance_workflow("simple")

    def test_run_wave(self):
        self._run_performance_workflow("wave_simple")

    def test_run_two_output(self):
        self._run_performance_workflow("two_output")

    def _run_performance_workflow(self, workflow_type):
        workflow_yaml = self.workflow_populator.scaling_workflow_yaml(
            workflow_type=workflow_type,
            collection_size=GALAXY_TEST_PERFORMANCE_COLLECTION_SIZE,
            workflow_depth=GALAXY_TEST_PERFORMANCE_WORKFLOW_DEPTH,
        )
        run_summary = self.workflow_populator.run_workflow(
            workflow_yaml,
            test_data={},
            wait=False,
        )
        self.workflow_populator.wait_for_workflow(
            run_summary.workflow_id,
            run_summary.invocation_id,
            run_summary.history_id,
            assert_ok=True,
            timeout=GALAXY_TEST_PERFORMANCE_TIMEOUT,
        )
예제 #2
0
class MaximumWorkflowInvocationDurationTestCase(
        integration_util.IntegrationTestCase):

    framework_tool_and_types = True

    def setUp(self):
        super().setUp()
        self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)

    @classmethod
    def handle_galaxy_config_kwds(cls, config):
        config["maximum_workflow_invocation_duration"] = 20

    def test(self):
        workflow = self.workflow_populator.load_workflow_from_resource(
            "test_workflow_pause")
        workflow_id = self.workflow_populator.create_workflow(workflow)
        history_id = self.dataset_populator.new_history()
        hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
        index_map = {'0': dict(src="hda", id=hda1["id"])}
        request = {}
        request["history"] = "hist_id=%s" % history_id
        request["inputs"] = dumps(index_map)
        request["inputs_by"] = 'step_index'
        url = "workflows/%s/invocations" % (workflow_id)
        invocation_response = self._post(url, data=request)
        invocation_url = url + "/" + invocation_response.json()["id"]
        time.sleep(5)
        state = self._get(invocation_url).json()["state"]
        assert state != "failed", state
        time.sleep(35)
        state = self._get(invocation_url).json()["state"]
        assert state == "failed", state
class FailJobWhenToolUnavailableTestCase(integration_util.IntegrationTestCase):

    require_admin_user = True

    def setUp(self):
        super(FailJobWhenToolUnavailableTestCase, self).setUp()
        self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
        self.history_id = self.dataset_populator.new_history()

    @classmethod
    def handle_galaxy_config_kwds(
        cls,
        config,
    ):
        # config["jobs_directory"] = cls.jobs_directory
        # Disable tool dependency resolution.
        config["tool_dependency_dir"] = "none"

    def test_fail_job_when_tool_unavailable(self):
        self.workflow_populator.run_workflow("""
class: GalaxyWorkflow
steps:
  - label: sleep
    run:
      class: GalaxyTool
      command: sleep 20s && echo 'hello world 2' > '$output1'
      outputs:
        output1:
          format: txt
  - tool_id: cat1
    state:
      input1:
        $link: sleep#output1
      queries:
        input2:
          $link: sleep#output1
""",
                                             history_id=self.history_id,
                                             assert_ok=False,
                                             wait=False)
        # Wait until workflow is fully scheduled, otherwise can't test effect of removing tool from queued job
        time.sleep(10)
        self._app.toolbox.remove_tool_by_id('cat1')
        self.dataset_populator.wait_for_history(self.history_id,
                                                assert_ok=False)
        state_details = self.galaxy_interactor.get(
            'histories/%s' % self.history_id).json()['state_details']
        assert state_details['running'] == 0
        assert state_details['ok'] == 1
        assert state_details['error'] == 1
        failed_hda = self.dataset_populator.get_history_dataset_details(
            history_id=self.history_id, assert_ok=False, details=True)
        assert failed_hda['state'] == 'error'
        job = self.galaxy_interactor.get("jobs/%s" %
                                         failed_hda['creating_job']).json()
        assert job['state'] == 'error'
class MaximumWorkflowJobsPerSchedulingIterationTestCase(
        integration_util.IntegrationTestCase):

    framework_tool_and_types = True

    def setUp(self):
        super(MaximumWorkflowJobsPerSchedulingIterationTestCase, self).setUp()
        self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
        self.dataset_collection_populator = DatasetCollectionPopulator(
            self.galaxy_interactor)

    @classmethod
    def handle_galaxy_config_kwds(cls, config):
        config["maximum_workflow_jobs_per_scheduling_iteration"] = 1

    def do_test(self):
        workflow_id = self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
  - type: input_collection
  - tool_id: collection_creates_pair
    state:
      input1:
        $link: 0
  - tool_id: collection_paired_test
    state:
      f1:
        $link: 1#paired_output
  - tool_id: cat_list
    state:
      input1:
        $link: 2#out1
""")
        with self.dataset_populator.test_history() as history_id:
            hdca1 = self.dataset_collection_populator.create_list_in_history(
                history_id, contents=["a\nb\nc\nd\n", "e\nf\ng\nh\n"]).json()
            self.dataset_populator.wait_for_history(history_id, assert_ok=True)
            inputs = {
                '0': {
                    "src": "hdca",
                    "id": hdca1["id"]
                },
            }
            invocation_id = self.workflow_populator.invoke_workflow(
                history_id, workflow_id, inputs)
            self.workflow_populator.wait_for_workflow(history_id, workflow_id,
                                                      invocation_id)
            self.dataset_populator.wait_for_history(history_id, assert_ok=True)
            self.assertEqual(
                "a\nc\nb\nd\ne\ng\nf\nh\n",
                self.dataset_populator.get_history_dataset_content(history_id,
                                                                   hid=0))
예제 #5
0
    def test_create_from_report(self):
        dataset_populator = DatasetPopulator(self.galaxy_interactor)
        workflow_populator = WorkflowPopulator(self.galaxy_interactor)
        test_data = """
input_1:
  value: 1.bed
  type: File
"""
        with dataset_populator.test_history() as history_id:
            summary = workflow_populator.run_workflow("""
class: GalaxyWorkflow
inputs:
  input_1: data
outputs:
  output_1:
    outputSource: first_cat/out_file1
steps:
  first_cat:
    tool_id: cat
    in:
      input1: input_1
""",
                                                      test_data=test_data,
                                                      history_id=history_id)

            workflow_id = summary.workflow_id
            invocation_id = summary.invocation_id
            report_json = workflow_populator.workflow_report_json(
                workflow_id, invocation_id)
            assert "markdown" in report_json
            self._assert_has_keys(report_json, "markdown", "render_format")
            assert report_json["render_format"] == "markdown"
            markdown_content = report_json["markdown"]
            page_request = dict(
                slug="invocation-report",
                title="Invocation Report",
                invocation_id=invocation_id,
            )
            page_response = self._post("pages", page_request, json=True)
            self._assert_status_code_is(page_response, 200)
            page_response = page_response.json()
            show_response = self._get(f"pages/{page_response['id']}")
            self._assert_status_code_is(show_response, 200)
            show_json = show_response.json()
            self._assert_has_keys(show_json, "slug", "title", "id")
            self.assertEqual(show_json["slug"], "invocation-report")
            self.assertEqual(show_json["title"], "Invocation Report")
            self.assertEqual(show_json["content_format"], "markdown")
            markdown_content = show_json["content"]
            assert "## Workflow Outputs" in markdown_content
            assert "## Workflow Inputs" in markdown_content
            assert "## About This Report" not in markdown_content
예제 #6
0
    def test_search_workflows(self):
        workflow_populator = WorkflowPopulator(self.galaxy_interactor)
        workflow_id = workflow_populator.simple_workflow("test_for_search")
        search_response = self.__search("select * from workflow")
        assert self.__has_result_with_name(
            search_response, "test_for_search"), search_response.text

        # Deleted
        delete_url = self._api_url("workflows/%s" % workflow_id, use_key=True)
        delete(delete_url)

        search_response = self.__search(
            "select * from workflow where deleted = False")
        assert not self.__has_result_with_name(
            search_response, "test_for_search"), search_response.text
예제 #7
0
class WorkflowSyncTestCase(integration_util.IntegrationTestCase):

    framework_tool_and_types = True
    require_admin_user = True

    def setUp(self):
        super(WorkflowSyncTestCase, self).setUp()
        self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)

    @classmethod
    def handle_galaxy_config_kwds(cls, config):
        cls.workflow_directory = cls._test_driver.mkdtemp()

    def test_sync_format2(self):
        workflow_path = self._write_workflow_content(
            "workflow.yml", WORKFLOW_SIMPLE_CAT_TWICE)
        workflow_id = self.workflow_populator.import_workflow_from_path(
            workflow_path)
        with self.workflow_populator.export_for_update(
                workflow_id) as workflow_object:
            workflow_object["annotation"] = "new annotation"
        with open(workflow_path, "r") as f:
            data = yaml.safe_load(f)
            assert data["doc"] == "new annotation"

    def test_sync_ga(self):
        workflow_json = self.workflow_populator.load_workflow("synctest")
        workflow_path = self._write_workflow_content("workflow.ga",
                                                     json.dumps(workflow_json))
        workflow_id = self.workflow_populator.import_workflow_from_path(
            workflow_path)
        with self.workflow_populator.export_for_update(
                workflow_id) as workflow_object:
            workflow_object["annotation"] = "new annotation"
        with open(workflow_path, "r") as f:
            data = json.load(f)
            assert data["annotation"] == "new annotation"

    def _write_workflow_content(self, filename, content):
        workflow_path = os.path.join(self.workflow_directory, filename)
        with open(workflow_path, "w") as f:
            f.write(content)
        return workflow_path
예제 #8
0
class BaseWorkflowHandlerConfigurationTestCase(
        integration_util.IntegrationTestCase):

    framework_tool_and_types = True
    assign_with = ""

    def setUp(self):
        super().setUp()
        self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
        self.history_id = self.dataset_populator.new_history()

    @classmethod
    def handle_galaxy_config_kwds(cls, config):
        config["job_config_file"] = config_file(
            WORKFLOW_HANDLER_JOB_CONFIG_TEMPLATE, assign_with=cls.assign_with)

    def _invoke_n_workflows(self, n):
        workflow_id = self.workflow_populator.upload_yaml_workflow(
            PAUSE_WORKFLOW)
        history_id = self.history_id
        hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
        index_map = {'0': dict(src="hda", id=hda1["id"])}
        request = {}
        request["history"] = "hist_id=%s" % history_id
        request["inputs"] = dumps(index_map)
        request["inputs_by"] = 'step_index'
        url = "workflows/%s/invocations" % (workflow_id)
        for i in range(n):
            self._post(url, data=request)

    def _get_workflow_invocations(self):
        # Consider exposing handler via the API to reduce breaking
        # into Galaxy's internal state.
        app = self._app
        history_id = app.security.decode_id(self.history_id)
        sa_session = app.model.context.current
        history = sa_session.query(app.model.History).get(history_id)
        workflow_invocations = history.workflow_invocations
        return workflow_invocations

    @property
    def is_app_workflow_scheduler(self):
        return self._app.workflow_scheduling_manager.request_monitor is not None
예제 #9
0
class PageApiTestCase(BasePageApiTestCase, SharingApiTests):

    api_name = "pages"

    def setUp(self):
        super().setUp()
        self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)

    def create(self, name: str) -> str:
        response_json = self._create_valid_page_with_slug(name)
        return response_json["id"]

    def test_create(self):
        response_json = self._create_valid_page_with_slug("mypage")
        self._assert_has_keys(response_json, "slug", "title", "id")

    @skip_without_tool("cat")
    def test_create_from_report(self):
        test_data = """
input_1:
  value: 1.bed
  type: File
"""
        with self.dataset_populator.test_history() as history_id:
            summary = self.workflow_populator.run_workflow(
                """
class: GalaxyWorkflow
inputs:
  input_1: data
outputs:
  output_1:
    outputSource: first_cat/out_file1
steps:
  first_cat:
    tool_id: cat
    in:
      input1: input_1
""",
                test_data=test_data,
                history_id=history_id)

            workflow_id = summary.workflow_id
            invocation_id = summary.invocation_id
            report_json = self.workflow_populator.workflow_report_json(
                workflow_id, invocation_id)
            assert "markdown" in report_json
            self._assert_has_keys(report_json, "markdown", "render_format")
            assert report_json["render_format"] == "markdown"
            markdown_content = report_json["markdown"]
            page_request = dict(
                slug="invocation-report",
                title="Invocation Report",
                invocation_id=invocation_id,
            )
            page_response = self._post("pages", page_request, json=True)
            self._assert_status_code_is(page_response, 200)
            page_response = page_response.json()
            show_response = self._get(f"pages/{page_response['id']}")
            self._assert_status_code_is(show_response, 200)
            show_json = show_response.json()
            self._assert_has_keys(show_json, "slug", "title", "id")
            self.assertEqual(show_json["slug"], "invocation-report")
            self.assertEqual(show_json["title"], "Invocation Report")
            self.assertEqual(show_json["content_format"], "markdown")
            markdown_content = show_json["content"]
            assert "## Workflow Outputs" in markdown_content
            assert "## Workflow Inputs" in markdown_content
            assert "## About This Report" not in markdown_content

    def test_index(self):
        create_response_json = self._create_valid_page_with_slug("indexpage")
        assert self._users_index_has_page_with_id(create_response_json["id"])

    def test_index_does_not_show_unavailable_pages(self):
        create_response_json = self._create_valid_page_as(
            "*****@*****.**", "otherspageindex")
        assert not self._users_index_has_page_with_id(
            create_response_json["id"])

    def test_cannot_create_pages_with_same_slug(self):
        page_request = self._test_page_payload(slug="mypage1")
        page_response_1 = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response_1, 200)
        page_response_2 = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response_2, 400)
        self._assert_error_code_is(
            page_response_2,
            error_codes.error_codes_by_name["USER_SLUG_DUPLICATE"])

    def test_cannot_create_pages_with_invalid_slug(self):
        page_request = self._test_page_payload(slug="invalid slug!")
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 400)

    def test_cannot_create_page_with_invalid_content_format(self):
        page_request = self._test_page_payload(slug="mypageinvalidformat")
        page_request["content_format"] = "xml"
        page_response_1 = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response_1, 400)
        self._assert_error_code_is(
            page_response_1,
            error_codes.error_codes_by_name["USER_REQUEST_INVALID_PARAMETER"])

    def test_page_requires_name(self):
        page_request = self._test_page_payload(slug="requires-name")
        del page_request['title']
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 400)
        self._assert_error_code_is(
            page_response,
            error_codes.error_codes_by_name["USER_REQUEST_MISSING_PARAMETER"])

    def test_page_requires_slug(self):
        page_request = self._test_page_payload()
        del page_request['slug']
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 400)

    def test_delete(self):
        response_json = self._create_valid_page_with_slug("testdelete")
        delete_response = delete(
            self._api_url(f"pages/{response_json['id']}", use_key=True))
        self._assert_status_code_is(delete_response, 204)

    def test_400_on_delete_invalid_page_id(self):
        delete_response = delete(
            self._api_url(f"pages/{self._random_key()}", use_key=True))
        self._assert_status_code_is(delete_response, 400)
        self._assert_error_code_is(
            delete_response, error_codes.error_codes_by_name["MALFORMED_ID"])

    def test_403_on_delete_unowned_page(self):
        page_response = self._create_valid_page_as("*****@*****.**",
                                                   "otherspage")
        delete_response = delete(
            self._api_url(f"pages/{page_response['id']}", use_key=True))
        self._assert_status_code_is(delete_response, 403)
        self._assert_error_code_is(
            delete_response,
            error_codes.error_codes_by_name["USER_DOES_NOT_OWN_ITEM"])

    def test_400_on_invalid_id_encoding(self):
        page_request = self._test_page_payload(slug="invalid-id-encding")
        page_request[
            "content"] = '''<p>Page!<div class="embedded-item" id="History-invaidencodedid"></div></p>'''
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 400)
        self._assert_error_code_is(
            page_response, error_codes.error_codes_by_name["MALFORMED_ID"])

    def test_400_on_invalid_id_encoding_markdown(self):
        page_request = self._test_page_payload(
            slug="invalid-id-encding-markdown", content_format="markdown")
        page_request[
            "content"] = '''```galaxy\nhistory_dataset_display(history_dataset_id=badencoding)\n```\n'''
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 400)
        self._assert_error_code_is(
            page_response, error_codes.error_codes_by_name["MALFORMED_ID"])

    def test_400_on_invalid_embedded_content(self):
        valid_id = self.dataset_populator.new_history()
        page_request = self._test_page_payload(slug="invalid-embed-content")
        page_request[
            "content"] = f'''<p>Page!<div class="embedded-item" id="CoolObject-{valid_id}"></div></p>'''
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 400)
        self._assert_error_code_is(
            page_response,
            error_codes.error_codes_by_name["USER_REQUEST_INVALID_PARAMETER"])
        assert "embedded HTML content" in page_response.text

    def test_400_on_invalid_markdown_call(self):
        page_request = self._test_page_payload(slug="invalid-markdown-call",
                                               content_format="markdown")
        page_request["content"] = '''```galaxy\njob_metrics(job_id)\n```\n'''
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 400)
        self._assert_error_code_is(
            page_response,
            error_codes.error_codes_by_name["MALFORMED_CONTENTS"])

    def test_show(self):
        response_json = self._create_valid_page_with_slug("pagetoshow")
        show_response = self._get(f"pages/{response_json['id']}")
        self._assert_status_code_is(show_response, 200)
        show_json = show_response.json()
        self._assert_has_keys(show_json, "slug", "title", "id")
        self.assertEqual(show_json["slug"], "pagetoshow")
        self.assertEqual(show_json["title"], "MY PAGE")
        self.assertEqual(show_json["content"], "<p>Page!</p>")
        self.assertEqual(show_json["content_format"], "html")

    def test_403_on_unowner_show(self):
        response_json = self._create_valid_page_as(
            "*****@*****.**", "otherspageshow")
        show_response = self._get(f"pages/{response_json['id']}")
        self._assert_status_code_is(show_response, 403)
        self._assert_error_code_is(
            show_response,
            error_codes.error_codes_by_name["USER_CANNOT_ACCESS_ITEM"])

    def test_501_on_download_pdf_when_service_unavailable(self):
        configuration = self.dataset_populator.get_configuration()
        can_produce_markdown = configuration["markdown_to_pdf_available"]
        if can_produce_markdown:
            raise SkipTest(
                "Skipping test because server does implement markdown conversion to PDF"
            )
        page_request = self._test_page_payload(
            slug="md-page-to-pdf-not-implemented", content_format="markdown")
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 200)
        page_id = page_response.json()['id']
        pdf_response = self._get(f"pages/{page_id}.pdf")
        api_asserts.assert_status_code_is(pdf_response, 501)
        api_asserts.assert_error_code_is(
            pdf_response, error_codes.
            error_codes_by_name["SERVER_NOT_CONFIGURED_FOR_REQUEST"])

    def test_pdf_when_service_available(self):
        configuration = self.dataset_populator.get_configuration()
        can_produce_markdown = configuration["markdown_to_pdf_available"]
        if not can_produce_markdown:
            raise SkipTest(
                "Skipping test because server does not implement markdown conversion to PDF"
            )
        page_request = self._test_page_payload(slug="md-page-to-pdf",
                                               content_format="markdown")
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 200)
        page_id = page_response.json()['id']
        pdf_response = self._get(f"pages/{page_id}.pdf")
        api_asserts.assert_status_code_is(pdf_response, 200)
        assert "application/pdf" in pdf_response.headers['content-type']
        assert pdf_response.content[0:4] == b"%PDF"

    def test_400_on_download_pdf_when_unsupported_content_format(self):
        page_request = self._test_page_payload(slug="html-page-to-pdf",
                                               content_format="html")
        page_response = self._post("pages", page_request, json=True)
        self._assert_status_code_is(page_response, 200)
        page_id = page_response.json()['id']
        pdf_response = self._get(f"pages/{page_id}.pdf")
        self._assert_status_code_is(pdf_response, 400)

    def _users_index_has_page_with_id(self, id):
        index_response = self._get("pages")
        self._assert_status_code_is(index_response, 200)
        pages = index_response.json()
        return id in (_["id"] for _ in pages)
예제 #10
0
class JobsApiTestCase(ApiTestCase, TestsTools):
    def setUp(self):
        super().setUp()
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
        self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
        self.dataset_collection_populator = DatasetCollectionPopulator(
            self.galaxy_interactor)

    @uses_test_history(require_new=True)
    def test_index(self, history_id):
        # Create HDA to ensure at least one job exists...
        self.__history_with_new_dataset(history_id)
        jobs = self.__jobs_index()
        assert "upload1" in map(itemgetter("tool_id"), jobs)

    @uses_test_history(require_new=True)
    def test_system_details_admin_only(self, history_id):
        self.__history_with_new_dataset(history_id)
        jobs = self.__jobs_index(admin=False)
        job = jobs[0]
        self._assert_not_has_keys(job, "external_id")

        jobs = self.__jobs_index(admin=True)
        job = jobs[0]
        self._assert_has_keys(job, "command_line", "external_id")

    @uses_test_history(require_new=True)
    def test_admin_job_list(self, history_id):
        self.__history_with_new_dataset(history_id)
        jobs_response = self._get("jobs?view=admin_job_list", admin=False)
        assert jobs_response.status_code == 403
        assert jobs_response.json(
        )['err_msg'] == 'Only admins can use the admin_job_list view'

        jobs = self._get("jobs?view=admin_job_list", admin=True).json()
        job = jobs[0]
        self._assert_has_keys(job, "command_line", "external_id", 'handler')

    @uses_test_history(require_new=True)
    def test_index_state_filter(self, history_id):
        # Initial number of ok jobs
        original_count = len(self.__uploads_with_state("ok"))
        # Run through dataset upload to ensure num uplaods at least greater
        # by 1.
        self.__history_with_ok_dataset(history_id)

        # Verify number of ok jobs is actually greater.
        count_increased = False
        for _ in range(10):
            new_count = len(self.__uploads_with_state("ok"))
            if original_count < new_count:
                count_increased = True
                break
            time.sleep(.1)

        if not count_increased:
            template = "Jobs in ok state did not increase (was %d, now %d)"
            message = template % (original_count, new_count)
            raise AssertionError(message)

    @uses_test_history(require_new=True)
    def test_index_date_filter(self, history_id):
        self.__history_with_new_dataset(history_id)
        two_weeks_ago = (datetime.datetime.utcnow() -
                         datetime.timedelta(14)).isoformat()
        last_week = (datetime.datetime.utcnow() -
                     datetime.timedelta(7)).isoformat()
        next_week = (datetime.datetime.utcnow() +
                     datetime.timedelta(7)).isoformat()
        today = datetime.datetime.utcnow().isoformat()
        tomorrow = (datetime.datetime.utcnow() +
                    datetime.timedelta(1)).isoformat()

        jobs = self.__jobs_index(data={
            "date_range_min": today[0:10],
            "date_range_max": tomorrow[0:10]
        })
        assert len(jobs) > 0
        today_job_id = jobs[0]["id"]

        jobs = self.__jobs_index(data={
            "date_range_min": two_weeks_ago,
            "date_range_max": last_week
        })
        assert today_job_id not in map(itemgetter("id"), jobs)

        jobs = self.__jobs_index(data={
            "date_range_min": last_week,
            "date_range_max": next_week
        })
        assert today_job_id in map(itemgetter("id"), jobs)

    @uses_test_history(require_new=True)
    def test_index_history(self, history_id):
        self.__history_with_new_dataset(history_id)
        jobs = self.__jobs_index(data={"history_id": history_id})
        assert len(jobs) > 0

        with self.dataset_populator.test_history() as other_history_id:
            jobs = self.__jobs_index(data={"history_id": other_history_id})
            assert len(jobs) == 0

    @uses_test_history(require_new=True)
    def test_index_workflow_and_invocation_filter(self, history_id):
        workflow_simple = """
class: GalaxyWorkflow
name: Simple Workflow
inputs:
  input1: data
outputs:
  wf_output_1:
    outputSource: first_cat/out_file1
steps:
  first_cat:
    tool_id: cat1
    in:
      input1: input1
"""
        summary = self.workflow_populator.run_workflow(
            workflow_simple,
            history_id=history_id,
            test_data={"input1": "hello world"})
        invocation_id = summary.invocation_id
        workflow_id = self._get(
            f"invocations/{invocation_id}").json()['workflow_id']
        self.workflow_populator.wait_for_invocation(workflow_id, invocation_id)
        jobs1 = self.__jobs_index(data={"workflow_id": workflow_id})
        assert len(jobs1) == 1
        jobs2 = self.__jobs_index(data={"invocation_id": invocation_id})
        assert len(jobs2) == 1
        assert jobs1 == jobs2

    @uses_test_history(require_new=True)
    def test_index_workflow_filter_implicit_jobs(self, history_id):
        workflow_id = self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
inputs:
  input_datasets: collection
steps:
  multi_data_optional:
    tool_id: multi_data_optional
    in:
      input1: input_datasets
""")
        hdca_id = self.dataset_collection_populator.create_list_of_list_in_history(
            history_id).json()
        self.dataset_populator.wait_for_history(history_id, assert_ok=True)
        inputs = {
            '0': self.dataset_populator.ds_entry(hdca_id),
        }
        invocation_id = self.workflow_populator.invoke_workflow_and_wait(
            workflow_id, history_id=history_id, inputs=inputs, assert_ok=True)
        jobs1 = self.__jobs_index(data={"workflow_id": workflow_id})
        jobs2 = self.__jobs_index(data={"invocation_id": invocation_id})
        assert len(jobs1) == len(jobs2) == 1
        second_invocation_id = self.workflow_populator.invoke_workflow_and_wait(
            workflow_id, history_id=history_id, inputs=inputs, assert_ok=True)
        workflow_jobs = self.__jobs_index(data={"workflow_id": workflow_id})
        second_invocation_jobs = self.__jobs_index(
            data={"invocation_id": second_invocation_id})
        assert len(workflow_jobs) == 2
        assert len(second_invocation_jobs) == 1

    @uses_test_history(require_new=True)
    def test_index_limit_and_offset_filter(self, history_id):
        self.__history_with_new_dataset(history_id)
        jobs = self.__jobs_index(data={"history_id": history_id})
        assert len(jobs) > 0
        length = len(jobs)
        jobs = self.__jobs_index(data={"history_id": history_id, "offset": 1})
        assert len(jobs) == length - 1
        jobs = self.__jobs_index(data={"history_id": history_id, "limit": 0})
        assert len(jobs) == 0

    @uses_test_history(require_new=True)
    def test_index_user_filter(self, history_id):
        test_user_email = "*****@*****.**"
        user = self._setup_user(test_user_email)
        with self._different_user(email=test_user_email):
            # User should be able to jobs for their own ID.
            jobs = self.__jobs_index(data={"user_id": user["id"]})
            assert jobs == []
        # Admin should be able to see jobs of another user.
        jobs = self.__jobs_index(data={"user_id": user["id"]}, admin=True)
        assert jobs == []
        # Normal user should not be able to see jobs of another user.
        jobs_response = self._get("jobs", data={"user_id": user["id"]})
        self._assert_status_code_is(jobs_response, 403)
        assert jobs_response.json() == {
            "err_msg": "Only admins can index the jobs of others",
            "err_code": 403006
        }

    @uses_test_history(require_new=True)
    def test_index_multiple_states_filter(self, history_id):
        # Initial number of ok jobs
        original_count = len(self.__uploads_with_state("ok", "new"))

        # Run through dataset upload to ensure num uplaods at least greater
        # by 1.
        self.__history_with_ok_dataset(history_id)

        # Verify number of ok jobs is actually greater.
        new_count = len(self.__uploads_with_state("new", "ok"))
        assert original_count < new_count, new_count

    @uses_test_history(require_new=True)
    def test_show(self, history_id):
        job_properties_tool_run = self.dataset_populator.run_tool(
            tool_id="job_properties",
            inputs={},
            history_id=history_id,
        )
        first_job = self.__jobs_index()[0]
        self._assert_has_key(first_job, 'id', 'state', 'exit_code',
                             'update_time', 'create_time')

        job_id = job_properties_tool_run["jobs"][0]["id"]
        show_jobs_response = self.dataset_populator.get_job_details(job_id)
        self._assert_status_code_is(show_jobs_response, 200)

        job_details = show_jobs_response.json()
        self._assert_has_key(job_details, 'id', 'state', 'exit_code',
                             'update_time', 'create_time')

        show_jobs_response = self.dataset_populator.get_job_details(job_id,
                                                                    full=True)
        self._assert_status_code_is(show_jobs_response, 200)

        job_details = show_jobs_response.json()
        self._assert_has_key(
            job_details,
            "create_time",
            "exit_code",
            "id",
            "job_messages",
            "job_stderr",
            "job_stdout",
            "state",
            "stderr",
            "stdout",
            "tool_stderr",
            "tool_stdout",
            "update_time",
        )

        self.dataset_populator.wait_for_job(job_id, assert_ok=True)
        show_jobs_response = self.dataset_populator.get_job_details(job_id,
                                                                    full=True)
        job_details = show_jobs_response.json()
        assert "The bool is not true\n" not in job_details["job_stdout"]
        assert "The bool is very not true\n" not in job_details["job_stderr"]
        assert job_details["tool_stdout"] == "The bool is not true\n"
        assert job_details["tool_stderr"] == "The bool is very not true\n"
        assert "The bool is not true\n" in job_details["stdout"]
        assert "The bool is very not true\n" in job_details["stderr"]

    @uses_test_history(require_new=True)
    def test_show_security(self, history_id):
        self.__history_with_new_dataset(history_id)
        jobs_response = self._get("jobs", data={"history_id": history_id})
        job = jobs_response.json()[0]
        job_id = job["id"]

        job_lock_response = self._get("job_lock", admin=True)
        job_lock_response.raise_for_status()
        assert not job_lock_response.json()["active"]

        show_jobs_response = self._get(f"jobs/{job_id}", admin=False)
        self._assert_not_has_keys(show_jobs_response.json(), "external_id")

        # TODO: Re-activate test case when API accepts privacy settings
        # with self._different_user():
        #    show_jobs_response = self._get( "jobs/%s" % job_id, admin=False )
        #    self._assert_status_code_is( show_jobs_response, 200 )

        show_jobs_response = self._get(f"jobs/{job_id}", admin=True)
        self._assert_has_keys(show_jobs_response.json(), "command_line",
                              "external_id")

    def _run_detect_errors(self, history_id, inputs):
        payload = self.dataset_populator.run_tool_payload(
            tool_id='detect_errors_aggressive',
            inputs=inputs,
            history_id=history_id,
        )
        return self._post("tools", data=payload).json()

    @skip_without_tool("detect_errors_aggressive")
    def test_unhide_on_error(self):
        with self.dataset_populator.test_history() as history_id:
            inputs = {'error_bool': 'true'}
            run_response = self._run_detect_errors(history_id=history_id,
                                                   inputs=inputs)
            job_id = run_response['jobs'][0]["id"]
            self.dataset_populator.wait_for_job(job_id)
            job = self.dataset_populator.get_job_details(job_id).json()
            assert job['state'] == 'error'
            dataset = self.dataset_populator.get_history_dataset_details(
                history_id=history_id,
                dataset_id=run_response['outputs'][0]['id'],
                assert_ok=False)
            assert dataset['visible']

    def _run_map_over_error(self, history_id):
        hdca1 = self.dataset_collection_populator.create_list_in_history(
            history_id, contents=[("sample1-1", "1 2 3")]).json()
        inputs = {
            'error_bool': 'true',
            'dataset': {
                'batch': True,
                'values': [{
                    'src': 'hdca',
                    'id': hdca1['id']
                }],
            }
        }
        return self._run_detect_errors(history_id=history_id, inputs=inputs)

    @skip_without_tool("detect_errors_aggressive")
    def test_no_unhide_on_error_if_mapped_over(self):
        with self.dataset_populator.test_history() as history_id:
            run_response = self._run_map_over_error(history_id)
            job_id = run_response['jobs'][0]["id"]
            self.dataset_populator.wait_for_job(job_id)
            job = self.dataset_populator.get_job_details(job_id).json()
            assert job['state'] == 'error'
            dataset = self.dataset_populator.get_history_dataset_details(
                history_id=history_id,
                dataset_id=run_response['outputs'][0]['id'],
                assert_ok=False)
            assert not dataset['visible']

    def test_no_hide_on_rerun(self):
        with self.dataset_populator.test_history() as history_id:
            run_response = self._run_map_over_error(history_id)
            job_id = run_response['jobs'][0]["id"]
            self.dataset_populator.wait_for_job(job_id)
            failed_hdca = self.dataset_populator.get_history_collection_details(
                history_id=history_id,
                content_id=run_response['implicit_collections'][0]['id'],
                assert_ok=False,
            )
            first_update_time = failed_hdca['update_time']
            assert failed_hdca['visible']
            rerun_params = self._get(f"jobs/{job_id}/build_for_rerun").json()
            inputs = rerun_params['state_inputs']
            inputs['rerun_remap_job_id'] = job_id
            rerun_response = self._run_detect_errors(history_id=history_id,
                                                     inputs=inputs)
            rerun_job_id = rerun_response['jobs'][0]["id"]
            self.dataset_populator.wait_for_job(rerun_job_id)
            # Verify source hdca is still visible
            hdca = self.dataset_populator.get_history_collection_details(
                history_id=history_id,
                content_id=run_response['implicit_collections'][0]['id'],
                assert_ok=False,
            )
            assert hdca['visible']
            assert isoparse(
                hdca['update_time']) > (isoparse(first_update_time))

    @skip_without_tool('empty_output')
    def test_common_problems(self):
        with self.dataset_populator.test_history() as history_id:
            empty_run_response = self.dataset_populator.run_tool(
                tool_id='empty_output',
                inputs={},
                history_id=history_id,
            )
            empty_hda = empty_run_response["outputs"][0]
            cat_empty_twice_run_response = self.dataset_populator.run_tool(
                tool_id='cat1',
                inputs={
                    'input1': {
                        'src': 'hda',
                        'id': empty_hda['id']
                    },
                    'queries_0|input2': {
                        'src': 'hda',
                        'id': empty_hda['id']
                    }
                },
                history_id=history_id,
            )
            empty_output_job = empty_run_response["jobs"][0]
            cat_empty_job = cat_empty_twice_run_response["jobs"][0]
            empty_output_common_problems_response = self._get(
                f"jobs/{empty_output_job['id']}/common_problems").json()
            cat_empty_common_problems_response = self._get(
                f"jobs/{cat_empty_job['id']}/common_problems").json()
            self._assert_has_keys(empty_output_common_problems_response,
                                  "has_empty_inputs", "has_duplicate_inputs")
            self._assert_has_keys(cat_empty_common_problems_response,
                                  "has_empty_inputs", "has_duplicate_inputs")
            assert not empty_output_common_problems_response["has_empty_inputs"]
            assert cat_empty_common_problems_response["has_empty_inputs"]
            assert not empty_output_common_problems_response[
                "has_duplicate_inputs"]
            assert cat_empty_common_problems_response["has_duplicate_inputs"]

    @skip_without_tool('detect_errors_aggressive')
    def test_report_error(self):
        with self.dataset_populator.test_history() as history_id:
            self._run_error_report(history_id)

    @skip_without_tool('detect_errors_aggressive')
    def test_report_error_anon(self):
        with self._different_user(anon=True):
            history_id = self._get(
                urllib.parse.urljoin(
                    self.url, "history/current_history_json")).json()['id']
            self._run_error_report(history_id)

    def _run_error_report(self, history_id):
        payload = self.dataset_populator.run_tool_payload(
            tool_id='detect_errors_aggressive',
            inputs={'error_bool': 'true'},
            history_id=history_id,
        )
        run_response = self._post("tools", data=payload).json()
        job_id = run_response['jobs'][0]["id"]
        self.dataset_populator.wait_for_job(job_id)
        dataset_id = run_response['outputs'][0]['id']
        response = self._post(f'jobs/{job_id}/error',
                              data={'dataset_id': dataset_id})
        assert response.status_code == 200, response.text

    @skip_without_tool('detect_errors_aggressive')
    def test_report_error_bootstrap_admin(self):
        with self.dataset_populator.test_history() as history_id:
            payload = self.dataset_populator.run_tool_payload(
                tool_id='detect_errors_aggressive',
                inputs={'error_bool': 'true'},
                history_id=history_id,
            )
            run_response = self._post("tools",
                                      data=payload,
                                      key=self.master_api_key)
            self._assert_status_code_is(run_response, 400)

    @skip_without_tool("create_2")
    @uses_test_history(require_new=True)
    def test_deleting_output_keep_running_until_all_deleted(self, history_id):
        job_state, outputs = self._setup_running_two_output_job(
            history_id, 120)

        self._hack_to_skip_test_if_state_ok(job_state)

        # Delete one of the two outputs and make sure the job is still running.
        self._raw_update_history_item(history_id, outputs[0]["id"],
                                      {"deleted": True})

        self._hack_to_skip_test_if_state_ok(job_state)

        time.sleep(1)

        self._hack_to_skip_test_if_state_ok(job_state)

        state = job_state().json()["state"]
        assert state == "running", state

        # Delete the second output and make sure the job is cancelled.
        self._raw_update_history_item(history_id, outputs[1]["id"],
                                      {"deleted": True})
        final_state = wait_on_state(job_state, assert_ok=False, timeout=15)
        assert final_state in ["deleting", "deleted"], final_state

    @skip_without_tool("create_2")
    @uses_test_history(require_new=True)
    def test_purging_output_keep_running_until_all_purged(self, history_id):
        job_state, outputs = self._setup_running_two_output_job(
            history_id, 120)

        # Pretty much right away after the job is running, these paths should be populated -
        # if they are grab them and make sure they are deleted at the end of the job.
        dataset_1 = self._get_history_item_as_admin(history_id,
                                                    outputs[0]["id"])
        dataset_2 = self._get_history_item_as_admin(history_id,
                                                    outputs[1]["id"])
        if "file_name" in dataset_1:
            output_dataset_paths = [
                dataset_1["file_name"], dataset_2["file_name"]
            ]
            # This may or may not exist depending on if the test is local or not.
            output_dataset_paths_exist = os.path.exists(
                output_dataset_paths[0])
        else:
            output_dataset_paths = []
            output_dataset_paths_exist = False

        self._hack_to_skip_test_if_state_ok(job_state)

        current_state = job_state().json()["state"]
        assert current_state == "running", current_state

        # Purge one of the two outputs and make sure the job is still running.
        self._raw_update_history_item(history_id, outputs[0]["id"],
                                      {"purged": True})
        time.sleep(1)

        self._hack_to_skip_test_if_state_ok(job_state)

        current_state = job_state().json()["state"]
        assert current_state == "running", current_state

        # Purge the second output and make sure the job is cancelled.
        self._raw_update_history_item(history_id, outputs[1]["id"],
                                      {"purged": True})
        final_state = wait_on_state(job_state, assert_ok=False, timeout=15)
        assert final_state in ["deleting", "deleted"], final_state

        def paths_deleted():
            if not os.path.exists(
                    output_dataset_paths[0]) and not os.path.exists(
                        output_dataset_paths[1]):
                return True

        if output_dataset_paths_exist:
            wait_on(paths_deleted, "path deletion")

    @skip_without_tool("create_2")
    @uses_test_history(require_new=True)
    def test_purging_output_cleaned_after_ok_run(self, history_id):
        job_state, outputs = self._setup_running_two_output_job(history_id, 10)

        # Pretty much right away after the job is running, these paths should be populated -
        # if they are grab them and make sure they are deleted at the end of the job.
        dataset_1 = self._get_history_item_as_admin(history_id,
                                                    outputs[0]["id"])
        dataset_2 = self._get_history_item_as_admin(history_id,
                                                    outputs[1]["id"])
        if "file_name" in dataset_1:
            output_dataset_paths = [
                dataset_1["file_name"], dataset_2["file_name"]
            ]
            # This may or may not exist depending on if the test is local or not.
            output_dataset_paths_exist = os.path.exists(
                output_dataset_paths[0])
        else:
            output_dataset_paths = []
            output_dataset_paths_exist = False

        if not output_dataset_paths_exist:
            # Given this Galaxy configuration - there is nothing more to be tested here.
            # Consider throwing a skip instead.
            return

        # Purge one of the two outputs and wait for the job to complete.
        self._raw_update_history_item(history_id, outputs[0]["id"],
                                      {"purged": True})
        wait_on_state(job_state, assert_ok=True)

        if output_dataset_paths_exist:
            time.sleep(.5)
            # Make sure the non-purged dataset is on disk and the purged one is not.
            assert os.path.exists(output_dataset_paths[1])
            assert not os.path.exists(output_dataset_paths[0])

    def _hack_to_skip_test_if_state_ok(self, job_state):
        from nose.plugins.skip import SkipTest
        if job_state().json()["state"] == "ok":
            message = "Job state switch from running to ok too quickly - the rest of the test requires the job to be in a running state. Skipping test."
            raise SkipTest(message)

    def _setup_running_two_output_job(self, history_id, sleep_time):
        payload = self.dataset_populator.run_tool_payload(
            tool_id='create_2',
            inputs=dict(sleep_time=sleep_time, ),
            history_id=history_id,
        )
        run_response = self._post("tools", data=payload)
        run_response.raise_for_status()
        run_object = run_response.json()
        outputs = run_object["outputs"]
        jobs = run_object["jobs"]

        assert len(outputs) == 2
        assert len(jobs) == 1

        def job_state():
            jobs_response = self._get(f"jobs/{jobs[0]['id']}")
            return jobs_response

        # Give job some time to get up and running.
        time.sleep(2)
        running_state = wait_on_state(job_state,
                                      skip_states=["queued", "new"],
                                      assert_ok=False,
                                      timeout=15)
        assert running_state == "running", running_state

        return job_state, outputs

    def _raw_update_history_item(self, history_id, item_id, data):
        update_url = self._api_url(
            f"histories/{history_id}/contents/{item_id}", use_key=True)
        update_response = requests.put(update_url, json=data)
        assert_status_code_is_ok(update_response)
        return update_response

    @skip_without_tool("cat_data_and_sleep")
    @uses_test_history(require_new=True)
    def test_resume_job(self, history_id):
        hda1 = self.dataset_populator.new_dataset(
            history_id, content="samp1\t10.0\nsamp2\t20.0\n")
        hda2 = self.dataset_populator.new_dataset(
            history_id, content="samp1\t30.0\nsamp2\t40.0\n")
        # Submit first job
        payload = self.dataset_populator.run_tool_payload(
            tool_id='cat_data_and_sleep',
            inputs={
                'sleep_time': 15,
                'input1': {
                    'src': 'hda',
                    'id': hda2['id']
                },
                'queries_0|input2': {
                    'src': 'hda',
                    'id': hda2['id']
                }
            },
            history_id=history_id,
        )
        run_response = self._post("tools", data=payload).json()
        output = run_response["outputs"][0]
        # Submit second job that waits on job1
        payload = self.dataset_populator.run_tool_payload(
            tool_id='cat1',
            inputs={
                'input1': {
                    'src': 'hda',
                    'id': hda1['id']
                },
                'queries_0|input2': {
                    'src': 'hda',
                    'id': output['id']
                }
            },
            history_id=history_id,
        )
        run_response = self._post("tools", data=payload).json()
        job_id = run_response['jobs'][0]['id']
        output = run_response["outputs"][0]
        # Delete second jobs input while second job is waiting for first job
        delete_response = self._delete(
            f"histories/{history_id}/contents/{hda1['id']}")
        self._assert_status_code_is(delete_response, 200)
        self.dataset_populator.wait_for_history_jobs(history_id,
                                                     assert_ok=False)
        dataset_details = self._get(
            f"histories/{history_id}/contents/{output['id']}").json()
        assert dataset_details['state'] == 'paused'
        # Undelete input dataset
        undelete_response = self._put(
            f"histories/{history_id}/contents/{hda1['id']}",
            data={'deleted': False},
            json=True)
        self._assert_status_code_is(undelete_response, 200)
        resume_response = self._put(f"jobs/{job_id}/resume")
        self._assert_status_code_is(resume_response, 200)
        self.dataset_populator.wait_for_history_jobs(history_id,
                                                     assert_ok=True)
        dataset_details = self._get(
            f"histories/{history_id}/contents/{output['id']}").json()
        assert dataset_details['state'] == 'ok'

    def _get_history_item_as_admin(self, history_id, item_id):
        response = self._get(
            f"histories/{history_id}/contents/{item_id}?view=detailed",
            admin=True)
        assert_status_code_is_ok(response)
        return response.json()

    @uses_test_history(require_new=True)
    def test_search(self, history_id):
        dataset_id = self.__history_with_ok_dataset(history_id)
        # We first copy the datasets, so that the update time is lower than the job creation time
        new_history_id = self.dataset_populator.new_history()
        copy_payload = {
            "content": dataset_id,
            "source": "hda",
            "type": "dataset"
        }
        copy_response = self._post(f"histories/{new_history_id}/contents",
                                   data=copy_payload,
                                   json=True)
        self._assert_status_code_is(copy_response, 200)
        inputs = json.dumps({'input1': {'src': 'hda', 'id': dataset_id}})
        self._job_search(tool_id='cat1', history_id=history_id, inputs=inputs)
        # We test that a job can be found even if the dataset has been copied to another history
        new_dataset_id = copy_response.json()['id']
        copied_inputs = json.dumps(
            {'input1': {
                'src': 'hda',
                'id': new_dataset_id
            }})
        search_payload = self._search_payload(history_id=history_id,
                                              tool_id='cat1',
                                              inputs=copied_inputs)
        self._search(search_payload, expected_search_count=1)
        # Now we delete the original input HDA that was used -- we should still be able to find the job
        delete_respone = self._delete(
            f"histories/{history_id}/contents/{dataset_id}")
        self._assert_status_code_is(delete_respone, 200)
        self._search(search_payload, expected_search_count=1)
        # Now we also delete the copy -- we shouldn't find a job
        delete_respone = self._delete(
            f"histories/{new_history_id}/contents/{new_dataset_id}")
        self._assert_status_code_is(delete_respone, 200)
        self._search(search_payload, expected_search_count=0)

    @uses_test_history(require_new=True)
    def test_search_handle_identifiers(self, history_id):
        # Test that input name and element identifier of a jobs' output must match for a job to be returned.
        dataset_id = self.__history_with_ok_dataset(history_id)
        inputs = json.dumps({'input1': {'src': 'hda', 'id': dataset_id}})
        self._job_search(tool_id='identifier_single',
                         history_id=history_id,
                         inputs=inputs)
        dataset_details = self._get(
            f"histories/{history_id}/contents/{dataset_id}").json()
        dataset_details['name'] = 'Renamed Test Dataset'
        dataset_update_response = self._put(
            f"histories/{history_id}/contents/{dataset_id}",
            data=dict(name='Renamed Test Dataset'),
            json=True)
        self._assert_status_code_is(dataset_update_response, 200)
        assert dataset_update_response.json()['name'] == 'Renamed Test Dataset'
        search_payload = self._search_payload(history_id=history_id,
                                              tool_id='identifier_single',
                                              inputs=inputs)
        self._search(search_payload, expected_search_count=0)

    @uses_test_history(require_new=True)
    def test_search_delete_outputs(self, history_id):
        dataset_id = self.__history_with_ok_dataset(history_id)
        inputs = json.dumps({'input1': {'src': 'hda', 'id': dataset_id}})
        tool_response = self._job_search(tool_id='cat1',
                                         history_id=history_id,
                                         inputs=inputs)
        output_id = tool_response.json()['outputs'][0]['id']
        delete_respone = self._delete(
            f"histories/{history_id}/contents/{output_id}")
        self._assert_status_code_is(delete_respone, 200)
        search_payload = self._search_payload(history_id=history_id,
                                              tool_id='cat1',
                                              inputs=inputs)
        self._search(search_payload, expected_search_count=0)

    @uses_test_history(require_new=True)
    def test_search_with_hdca_list_input(self, history_id):
        list_id_a = self.__history_with_ok_collection(collection_type='list',
                                                      history_id=history_id)
        list_id_b = self.__history_with_ok_collection(collection_type='list',
                                                      history_id=history_id)
        inputs = json.dumps({
            'f1': {
                'src': 'hdca',
                'id': list_id_a
            },
            'f2': {
                'src': 'hdca',
                'id': list_id_b
            },
        })
        tool_response = self._job_search(tool_id='multi_data_param',
                                         history_id=history_id,
                                         inputs=inputs)
        # We switch the inputs, this should not return a match
        inputs_switched = json.dumps({
            'f2': {
                'src': 'hdca',
                'id': list_id_a
            },
            'f1': {
                'src': 'hdca',
                'id': list_id_b
            },
        })
        search_payload = self._search_payload(history_id=history_id,
                                              tool_id='multi_data_param',
                                              inputs=inputs_switched)
        self._search(search_payload, expected_search_count=0)
        # We delete the ouput (this is a HDA, as multi_data_param reduces collections)
        # and use the correct input job definition, the job should not be found
        output_id = tool_response.json()['outputs'][0]['id']
        delete_respone = self._delete(
            f"histories/{history_id}/contents/{output_id}")
        self._assert_status_code_is(delete_respone, 200)
        search_payload = self._search_payload(history_id=history_id,
                                              tool_id='multi_data_param',
                                              inputs=inputs)
        self._search(search_payload, expected_search_count=0)

    @uses_test_history(require_new=True)
    def test_search_delete_hdca_output(self, history_id):
        list_id_a = self.__history_with_ok_collection(collection_type='list',
                                                      history_id=history_id)
        inputs = json.dumps({
            'input1': {
                'src': 'hdca',
                'id': list_id_a
            },
        })
        tool_response = self._job_search(tool_id='collection_creates_list',
                                         history_id=history_id,
                                         inputs=inputs)
        output_id = tool_response.json()['outputs'][0]['id']
        # We delete a single tool output, no job should be returned
        delete_respone = self._delete(
            f"histories/{history_id}/contents/{output_id}")
        self._assert_status_code_is(delete_respone, 200)
        search_payload = self._search_payload(
            history_id=history_id,
            tool_id='collection_creates_list',
            inputs=inputs)
        self._search(search_payload, expected_search_count=0)
        tool_response = self._job_search(tool_id='collection_creates_list',
                                         history_id=history_id,
                                         inputs=inputs)
        output_collection_id = tool_response.json(
        )['output_collections'][0]['id']
        # We delete a collection output, no job should be returned
        delete_respone = self._delete(
            f"histories/{history_id}/contents/dataset_collections/{output_collection_id}"
        )
        self._assert_status_code_is(delete_respone, 200)
        search_payload = self._search_payload(
            history_id=history_id,
            tool_id='collection_creates_list',
            inputs=inputs)
        self._search(search_payload, expected_search_count=0)

    @uses_test_history(require_new=True)
    def test_search_with_hdca_pair_input(self, history_id):
        list_id_a = self.__history_with_ok_collection(collection_type='pair',
                                                      history_id=history_id)
        inputs = json.dumps({
            'f1': {
                'src': 'hdca',
                'id': list_id_a
            },
            'f2': {
                'src': 'hdca',
                'id': list_id_a
            },
        })
        self._job_search(tool_id='multi_data_param',
                         history_id=history_id,
                         inputs=inputs)
        # We test that a job can be found even if the collection has been copied to another history
        new_history_id = self.dataset_populator.new_history()
        copy_payload = {
            "content": list_id_a,
            "source": "hdca",
            "type": "dataset_collection"
        }
        copy_response = self._post(f"histories/{new_history_id}/contents",
                                   data=copy_payload,
                                   json=True)
        self._assert_status_code_is(copy_response, 200)
        new_list_a = copy_response.json()['id']
        copied_inputs = json.dumps({
            'f1': {
                'src': 'hdca',
                'id': new_list_a
            },
            'f2': {
                'src': 'hdca',
                'id': new_list_a
            },
        })
        search_payload = self._search_payload(history_id=new_history_id,
                                              tool_id='multi_data_param',
                                              inputs=copied_inputs)
        self._search(search_payload, expected_search_count=1)
        # Now we delete the original input HDCA that was used -- we should still be able to find the job
        delete_respone = self._delete(
            f"histories/{history_id}/contents/dataset_collections/{list_id_a}")
        self._assert_status_code_is(delete_respone, 200)
        self._search(search_payload, expected_search_count=1)
        # Now we also delete the copy -- we shouldn't find a job
        delete_respone = self._delete(
            f"histories/{history_id}/contents/dataset_collections/{new_list_a}"
        )
        self._assert_status_code_is(delete_respone, 200)
        self._search(search_payload, expected_search_count=0)

    @uses_test_history(require_new=True)
    def test_search_with_hdca_list_pair_input(self, history_id):
        list_id_a = self.__history_with_ok_collection(
            collection_type='list:pair', history_id=history_id)
        inputs = json.dumps({
            'f1': {
                'src': 'hdca',
                'id': list_id_a
            },
            'f2': {
                'src': 'hdca',
                'id': list_id_a
            },
        })
        self._job_search(tool_id='multi_data_param',
                         history_id=history_id,
                         inputs=inputs)

    @uses_test_history(require_new=True)
    def test_search_with_hdca_list_pair_collection_mapped_over_pair_input(
            self, history_id):
        list_id_a = self.__history_with_ok_collection(
            collection_type='list:pair', history_id=history_id)
        inputs = json.dumps({
            'f1': {
                'batch':
                True,
                'values': [{
                    'src': 'hdca',
                    'id': list_id_a,
                    'map_over_type': 'paired'
                }]
            },
        })
        self._job_search(tool_id='collection_paired_test',
                         history_id=history_id,
                         inputs=inputs)

    def _get_simple_rerun_params(self, history_id, private=False):
        list_id_a = self.__history_with_ok_collection(
            collection_type='list:pair', history_id=history_id)
        inputs = {
            'f1': {
                'batch':
                True,
                'values': [{
                    'src': 'hdca',
                    'id': list_id_a,
                    'map_over_type': 'paired'
                }]
            }
        }
        run_response = self._run(
            history_id=history_id,
            tool_id="collection_paired_test",
            inputs=inputs,
            wait_for_job=True,
            assert_ok=True,
        )
        rerun_params = self._get(
            f"jobs/{run_response['jobs'][0]['id']}/build_for_rerun").json()
        # Since we call rerun on the first (and only) job we should get the expanded input
        # which is a dataset collection element (and not the list:pair hdca that was used as input to the original
        # job).
        assert rerun_params['state_inputs']['f1']['values'][0]['src'] == 'dce'
        if private:
            hdca = self.dataset_populator.get_history_collection_details(
                history_id=history_id, content_id=list_id_a)
            for element in hdca['elements'][0]['object']['elements']:
                self.dataset_populator.make_private(history_id,
                                                    element['object']['id'])
        return rerun_params

    @skip_without_tool("collection_paired_test")
    @uses_test_history(require_new=False)
    def test_job_build_for_rerun(self, history_id):
        rerun_params = self._get_simple_rerun_params(history_id)
        self._run(
            history_id=history_id,
            tool_id="collection_paired_test",
            inputs=rerun_params['state_inputs'],
            wait_for_job=True,
            assert_ok=True,
        )

    @skip_without_tool("collection_paired_test")
    @uses_test_history(require_new=False)
    def test_dce_submission_security(self, history_id):
        rerun_params = self._get_simple_rerun_params(history_id, private=True)
        with self._different_user():
            other_history_id = self.dataset_populator.new_history()
            response = self._run(
                history_id=other_history_id,
                tool_id="collection_paired_test",
                inputs=rerun_params['state_inputs'],
                wait_for_job=False,
                assert_ok=False,
            )
            assert response.status_code == 403

    @skip_without_tool("identifier_collection")
    @uses_test_history(require_new=False)
    def test_job_build_for_rerun_list_list(self, history_id):
        list_id_a = self.__history_with_ok_collection(collection_type='list',
                                                      history_id=history_id)
        list_id_b = self.__history_with_ok_collection(collection_type='list',
                                                      history_id=history_id)
        list_list = self.dataset_collection_populator.create_nested_collection(
            history_id=history_id,
            collection_type='list:list',
            name='list list collection',
            collection=[list_id_a, list_id_b]).json()
        list_list_id = list_list['id']
        first_element = list_list['elements'][0]
        assert first_element['element_type'] == 'dataset_collection'
        assert first_element['element_identifier'] == 'test0'
        assert first_element['model_class'] == 'DatasetCollectionElement'
        inputs = {
            'input1': {
                'batch':
                True,
                'values': [{
                    'src': 'hdca',
                    'id': list_list_id,
                    'map_over_type': 'list'
                }]
            }
        }
        run_response = self._run(
            history_id=history_id,
            tool_id="identifier_collection",
            inputs=inputs,
            wait_for_job=True,
            assert_ok=True,
        )
        assert len(run_response['jobs']) == 2
        rerun_params = self._get(
            f"jobs/{run_response['jobs'][0]['id']}/build_for_rerun").json()
        # Since we call rerun on the first (and only) job we should get the expanded input
        # which is a dataset collection element (and not the list:list hdca that was used as input to the original
        # job).
        assert rerun_params['state_inputs']['input1']['values'][0][
            'src'] == 'dce'
        rerun_response = self._run(
            history_id=history_id,
            tool_id="identifier_collection",
            inputs=rerun_params['state_inputs'],
            wait_for_job=True,
            assert_ok=True,
        )
        assert len(rerun_response['jobs']) == 1
        rerun_content = self.dataset_populator.get_history_dataset_content(
            history_id=history_id, dataset=rerun_response['outputs'][0])
        run_content = self.dataset_populator.get_history_dataset_content(
            history_id=history_id, dataset=run_response['outputs'][0])
        assert rerun_content == run_content

    def _job_search(self, tool_id, history_id, inputs):
        search_payload = self._search_payload(history_id=history_id,
                                              tool_id=tool_id,
                                              inputs=inputs)
        empty_search_response = self._post("jobs/search", data=search_payload)
        self._assert_status_code_is(empty_search_response, 200)
        self.assertEqual(len(empty_search_response.json()), 0)
        tool_response = self._post("tools", data=search_payload)
        self.dataset_populator.wait_for_tool_run(history_id,
                                                 run_response=tool_response)
        self._search(search_payload, expected_search_count=1)
        return tool_response

    def _search_payload(self, history_id, tool_id, inputs, state='ok'):
        search_payload = dict(tool_id=tool_id,
                              inputs=inputs,
                              history_id=history_id,
                              state=state)
        return search_payload

    def _search(self, payload, expected_search_count=1):
        # in case job and history aren't updated at exactly the same
        # time give time to wait
        for _ in range(5):
            search_count = self._search_count(payload)
            if search_count == expected_search_count:
                break
            time.sleep(1)
        assert search_count == expected_search_count, "expected to find %d jobs, got %d jobs" % (
            expected_search_count, search_count)
        return search_count

    def _search_count(self, search_payload):
        search_response = self._post("jobs/search", data=search_payload)
        self._assert_status_code_is(search_response, 200)
        search_json = search_response.json()
        return len(search_json)

    def __uploads_with_state(self, *states):
        jobs_response = self._get("jobs", data=dict(state=states))
        self._assert_status_code_is(jobs_response, 200)
        jobs = jobs_response.json()
        assert not [j for j in jobs if not j['state'] in states]
        return [j for j in jobs if j['tool_id'] == 'upload1']

    def __history_with_new_dataset(self, history_id):
        dataset_id = self.dataset_populator.new_dataset(history_id)["id"]
        return dataset_id

    def __history_with_ok_dataset(self, history_id):
        dataset_id = self.dataset_populator.new_dataset(history_id,
                                                        wait=True)["id"]
        return dataset_id

    def __history_with_ok_collection(self,
                                     collection_type='list',
                                     history_id=None):
        if not history_id:
            history_id = self.dataset_populator.new_history()
        if collection_type == 'list':
            fetch_response = self.dataset_collection_populator.create_list_in_history(
                history_id, direct_upload=True).json()
        elif collection_type == 'pair':
            fetch_response = self.dataset_collection_populator.create_pair_in_history(
                history_id, direct_upload=True).json()
        elif collection_type == 'list:pair':
            fetch_response = self.dataset_collection_populator.create_list_of_pairs_in_history(
                history_id).json()
        self.dataset_collection_populator.wait_for_fetched_collection(
            fetch_response)
        return fetch_response["outputs"][0]['id']

    def __jobs_index(self, **kwds):
        jobs_response = self._get("jobs", **kwds)
        self._assert_status_code_is(jobs_response, 200)
        jobs = jobs_response.json()
        assert isinstance(jobs, list)
        return jobs
 def setUp(self):
     super(BaseWorkflowHandlerConfigurationTestCase, self).setUp()
     self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
     self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
     self.history_id = self.dataset_populator.new_history()
 def setUp(self):
     super(MaximumWorkflowJobsPerSchedulingIterationTestCase, self).setUp()
     self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
     self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
     self.dataset_collection_populator = DatasetCollectionPopulator(
         self.galaxy_interactor)
 def setUp(self):
     super(MaximumWorkflowInvocationDurationTestCase, self).setUp()
     self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
     self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
예제 #14
0
class MaximumWorkflowJobsPerSchedulingIterationTestCase(
        integration_util.IntegrationTestCase):

    framework_tool_and_types = True

    def setUp(self):
        super().setUp()
        self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
        self.dataset_collection_populator = DatasetCollectionPopulator(
            self.galaxy_interactor)

    @classmethod
    def handle_galaxy_config_kwds(cls, config):
        config["maximum_workflow_jobs_per_scheduling_iteration"] = 1

    def test_collection_explicit_and_implicit(self):
        workflow_id = self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
  - type: input_collection
  - tool_id: collection_creates_pair
    state:
      input1:
        $link: 0
  - tool_id: collection_paired_test
    state:
      f1:
        $link: 1/paired_output
  - tool_id: cat_list
    state:
      input1:
        $link: 2/out1
""")
        with self.dataset_populator.test_history() as history_id:
            hdca1 = self.dataset_collection_populator.create_list_in_history(
                history_id, contents=["a\nb\nc\nd\n", "e\nf\ng\nh\n"]).json()
            self.dataset_populator.wait_for_history(history_id, assert_ok=True)
            inputs = {
                '0': {
                    "src": "hdca",
                    "id": hdca1["id"]
                },
            }
            invocation_id = self.workflow_populator.invoke_workflow(
                history_id, workflow_id, inputs)
            self.workflow_populator.wait_for_workflow(history_id, workflow_id,
                                                      invocation_id)
            self.dataset_populator.wait_for_history(history_id, assert_ok=True)
            self.assertEqual(
                "a\nc\nb\nd\ne\ng\nf\nh\n",
                self.dataset_populator.get_history_dataset_content(history_id,
                                                                   hid=0))

    def test_scheduling_rounds(self):
        with self.dataset_populator.test_history() as history_id:
            invocation_response = self.workflow_populator.run_workflow(
                """
class: GalaxyWorkflow
inputs:
  input1: data
  text_input: text
steps:
  first_cat:
    tool_id: cat1
    in:
      input1: input1
  second_cat:
    tool_id: cat1
    in:
      input1: first_cat/out_file1
  collection_creates_dynamic_list_of_pairs:
    tool_id: collection_creates_dynamic_list_of_pairs
    in:
      file: second_cat/out_file1
  count_multi_file:
    tool_id: count_multi_file
    in:
      input1: collection_creates_dynamic_list_of_pairs/list_output
outputs:
  wf_output_1:
    outputSource: collection_creates_dynamic_list_of_pairs/list_output
""",
                test_data="""
input1:
  value: 1.fasta
  type: File
  name: fasta1
text_input: foo
""",
                history_id=history_id)
            invocation = self._get("/invocations/{}".format(
                invocation_response.invocation_id)).json()
            assert 'wf_output_1' in invocation['output_collections']
예제 #15
0
class WorkflowRefactoringIntegrationTestCase(
        integration_util.IntegrationTestCase, UsesShed):

    framework_tool_and_types = True

    def setUp(self):
        super().setUp()
        self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)

    def test_basic_refactoring_types(self):
        self.workflow_populator.upload_yaml_workflow(REFACTORING_SIMPLE_TEST)

        actions = [
            {
                "action_type": "update_name",
                "name": "my cool new name"
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.stored_workflow.name == "my cool new name"

        actions = [
            {
                "action_type": "update_annotation",
                "annotation": "my cool new annotation"
            },
        ]
        response = self._refactor(actions)
        assert response.workflow["annotation"] == "my cool new annotation"

        actions = [
            {
                "action_type": "update_license",
                "license": "AFL-3.0"
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.license == "AFL-3.0"

        actions = [
            {
                "action_type": "update_creator",
                "creator": [{
                    "class": "Person",
                    "name": "Mary"
                }]
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.creator_metadata[0]["class"] == "Person"
        assert self._latest_workflow.creator_metadata[0]["name"] == "Mary"

        actions = [{
            "action_type": "update_report",
            "report": {
                "markdown": "my report..."
            }
        }]
        self._refactor(actions)
        assert self._latest_workflow.reports_config[
            "markdown"] == "my report..."

        assert self._latest_workflow.step_by_index(0).label == "test_input"
        actions = [
            {
                "action_type": "update_step_label",
                "step": {
                    "order_index": 0
                },
                "label": "new_label"
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.step_by_index(0).label == "new_label"

        actions = [
            {
                "action_type": "update_step_position",
                "step": {
                    "order_index": 0
                },
                "position": {
                    "left": 3,
                    "top": 5
                }
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.step_by_index(0).label == "new_label"
        assert self._latest_workflow.step_by_index(0).position["left"] == 3
        assert self._latest_workflow.step_by_index(0).position["top"] == 5

        # Build raw steps...
        actions = [
            {
                "action_type": "add_step",
                "type": "parameter_input",
                "label": "new_param",
                "tool_state": {
                    "parameter_type": "text"
                },
                "position": {
                    "left": 10,
                    "top": 50
                }
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.step_by_label(
            "new_param").label == "new_param"
        assert self._latest_workflow.step_by_label(
            "new_param").tool_inputs.get("optional", False) is False
        assert self._latest_workflow.step_by_label(
            "new_param").position["left"] == 10
        assert self._latest_workflow.step_by_label(
            "new_param").position["top"] == 50

        # Cleaner syntax for defining inputs...
        actions = [
            {
                "action_type": "add_input",
                "type": "text",
                "label": "new_param2",
                "optional": True,
                "position": {
                    "top": 1,
                    "left": 2
                }
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.step_by_label(
            "new_param2").label == "new_param2"
        assert self._latest_workflow.step_by_label(
            "new_param2").tool_inputs.get("optional", False) is True
        assert self._latest_workflow.step_by_label(
            "new_param2").position["top"] == 1
        assert self._latest_workflow.step_by_label(
            "new_param2").position["left"] == 2

        assert len(
            self._latest_workflow.step_by_label("first_cat").inputs) == 1
        actions = [{
            "action_type": "disconnect",
            "input": {
                "label": "first_cat",
                "input_name": "input1"
            },
            "output": {
                "label": "new_label"
            },
        }]
        self._refactor(actions)
        assert len(
            self._latest_workflow.step_by_label("first_cat").inputs) == 0

        actions = [{
            "action_type": "connect",
            "input": {
                "label": "first_cat",
                "input_name": "input1"
            },
            "output": {
                "label": "new_label"
            },
        }]
        self._refactor(actions)
        assert len(
            self._latest_workflow.step_by_label("first_cat").inputs) == 1

        # Re-disconnect so we can test extract_input
        actions = [{
            "action_type": "disconnect",
            "input": {
                "label": "first_cat",
                "input_name": "input1"
            },
            "output": {
                "label": "new_label"
            },
        }]
        self._refactor(actions)

        # try to create an input for first_cat/input1 automatically
        actions = [{
            "action_type": "extract_input",
            "input": {
                "label": "first_cat",
                "input_name": "input1"
            },
            "label": "extracted_input",
        }]
        self._refactor(actions)
        assert self._latest_workflow.step_by_label("extracted_input")
        assert len(
            self._latest_workflow.step_by_label("first_cat").inputs) == 1

        actions = [{
            "action_type": "update_output_label",
            "output": {
                "label": "first_cat",
                "output_name": "out_file1"
            },
            "output_label": "new_wf_out",
        }]
        self._refactor(actions)
        assert self._latest_workflow.step_by_label(
            "first_cat").workflow_outputs[0].label == "new_wf_out"

    def test_basic_refactoring_types_dry_run(self):
        self.workflow_populator.upload_yaml_workflow(REFACTORING_SIMPLE_TEST)

        actions = [
            {
                "action_type": "update_name",
                "name": "my cool new name"
            },
        ]
        response = self._dry_run(actions)
        assert response.workflow["name"] == "my cool new name"

        actions = [
            {
                "action_type": "update_annotation",
                "annotation": "my cool new annotation"
            },
        ]
        response = self._dry_run(actions)
        assert response.workflow["annotation"] == "my cool new annotation"

        actions = [
            {
                "action_type": "update_license",
                "license": "AFL-3.0"
            },
        ]
        response = self._dry_run(actions)
        assert response.workflow["license"] == "AFL-3.0"

        actions = [
            {
                "action_type": "update_creator",
                "creator": [{
                    "class": "Person",
                    "name": "Mary"
                }]
            },
        ]
        response = self._dry_run(actions)
        creator_list = response.workflow["creator"]
        assert isinstance(creator_list, list)
        creator = creator_list[0]
        assert creator["class"] == "Person"
        assert creator["name"] == "Mary"

        actions = [{
            "action_type": "update_report",
            "report": {
                "markdown": "my report..."
            }
        }]
        response = self._dry_run(actions)
        assert response.workflow["report"]["markdown"] == "my report..."

        actions = [
            {
                "action_type": "add_step",
                "type": "parameter_input",
                "label": "new_param",
                "tool_state": {
                    "parameter_type": "text"
                },
                "position": {
                    "left": 10,
                    "top": 50
                }
            },
        ]
        response = self._dry_run(actions)
        workflow_dict = response.workflow
        assert _step_with_label(workflow_dict, "new_param")

        actions = [{
            "action_type": "update_output_label",
            "output": {
                "label": "first_cat",
                "output_name": "out_file1"
            },
            "output_label": "new_wf_out",
        }]
        response = self._dry_run(actions)
        workflow_dict = response.workflow
        first_cat_step = _step_with_label(workflow_dict, "first_cat")
        assert first_cat_step["workflow_outputs"][0]["label"] == "new_wf_out"

    def test_refactoring_legacy_parameters(self):
        wf = self.workflow_populator.load_workflow_from_resource(
            "test_workflow_randomlines_legacy_params")
        self.workflow_populator.create_workflow(wf)
        actions = [
            {
                "action_type": "extract_untyped_parameter",
                "name": "seed"
            },
            {
                "action_type": "extract_untyped_parameter",
                "name": "num",
                "label": "renamed_num"
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.step_by_label(
            "seed").tool_inputs["parameter_type"] == "text"
        assert self._latest_workflow.step_by_label(
            "renamed_num").tool_inputs["parameter_type"] == "integer"
        random_lines_state = self._latest_workflow.step_by_index(2).tool_inputs
        assert "num_lines" in random_lines_state
        num_lines = random_lines_state["num_lines"]
        assert isinstance(num_lines, dict)
        assert "__class__" in num_lines
        assert num_lines["__class__"] == 'ConnectedValue'
        assert "seed_source" in random_lines_state
        seed_source = random_lines_state["seed_source"]
        assert isinstance(seed_source, dict)
        assert "seed" in seed_source
        seed = seed_source["seed"]
        assert isinstance(seed, dict)
        assert "__class__" in seed
        assert seed["__class__"] == 'ConnectedValue'

        # cannot handle mixed, incompatible types on the inputs though
        wf = self.workflow_populator.load_workflow_from_resource(
            "test_workflow_randomlines_legacy_params_mixed_types")
        self.workflow_populator.create_workflow(wf)
        actions = [
            {
                "action_type": "extract_untyped_parameter",
                "name": "mixed_param"
            },
        ]
        expected_exception = None
        try:
            self._refactor(actions)
        except Exception as e:
            expected_exception = e
        assert expected_exception
        assert "input types" in str(expected_exception)

    def test_refactoring_legacy_parameters_without_tool_state(self):
        # test parameters used in PJA without being used in tool state.
        # These will work fine with the simplified workflow UI, but should probably
        # be formalized and assigned a unique label and informative annotation.
        self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
inputs:
  test_input: data
steps:
  first_cat:
    tool_id: cat
    in:
      input1: test_input
    outputs:
      out_file1:
        rename: "${pja_only_param} name"
""")
        actions = [
            {
                "action_type": "extract_untyped_parameter",
                "name": "pja_only_param"
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.step_by_label(
            "pja_only_param").tool_inputs["parameter_type"] == "text"

    def test_refactoring_legacy_parameters_without_tool_state_dry_run(self):
        # same as above but dry run...
        self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
inputs:
  test_input: data
steps:
  first_cat:
    tool_id: cat
    in:
      input1: test_input
    outputs:
      out_file1:
        rename: "${pja_only_param} name"
""")
        actions = [
            {
                "action_type": "extract_untyped_parameter",
                "name": "pja_only_param"
            },
        ]
        response = self._dry_run(actions)
        new_step = _step_with_label(response.workflow, "pja_only_param")
        state_str = new_step["tool_state"]
        state = json.loads(state_str)
        assert state["parameter_type"] == "text"

    def test_refactoring_legacy_parameters_without_tool_state_relabel(self):
        # same thing as above, but apply relabeling and ensure PJA gets updated.
        self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
inputs:
  test_input: data
steps:
  first_cat:
    tool_id: cat
    in:
      input1: test_input
    outputs:
      out_file1:
        rename: "${pja_only_param} name"
""")
        actions = [
            {
                "action_type": "extract_untyped_parameter",
                "name": "pja_only_param",
                "label": "new_label"
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.step_by_label(
            "new_label").tool_inputs["parameter_type"] == "text"
        pjas = self._latest_workflow.step_by_label(
            "first_cat").post_job_actions
        assert len(pjas) == 1
        pja = pjas[0]
        assert "newname" in pja.action_arguments
        assert "${new_label}" in pja.action_arguments["newname"]

    def test_removing_unlabeled_workflow_outputs(self):
        wf = self.workflow_populator.load_workflow_from_resource(
            "test_workflow_randomlines_legacy_params")
        self.workflow_populator.create_workflow(wf)
        only_step = self._latest_workflow.step_by_index(0)
        assert len(only_step.workflow_outputs) == 1
        actions = [
            {
                "action_type": "remove_unlabeled_workflow_outputs"
            },
        ]
        self._refactor(actions)
        only_step = self._latest_workflow.step_by_index(0)
        assert len(only_step.workflow_outputs) == 0

    def test_fill_defaults_option(self):
        # this is a prereq for other state filling refactoring tests that
        # would be better in API tests for workflow import options but fill
        # defaults happens automatically on export, so this might only be
        # testable in an integration test currently.

        # populating a workflow with incomplete state...
        wf = self.workflow_populator.load_workflow_from_resource(
            "test_workflow_two_random_lines")
        ts = json.loads(wf["steps"]["0"]["tool_state"])
        del ts["num_lines"]
        wf["steps"]["0"]["tool_state"] = json.dumps(ts)
        self.workflow_populator.create_workflow(wf, fill_defaults=False)
        first_step = self._latest_workflow.step_by_label("random1")
        assert "num_lines" not in first_step.tool_inputs

        self.workflow_populator.create_workflow(wf, fill_defaults=True)
        first_step = self._latest_workflow.step_by_label("random1")
        assert "num_lines" in first_step.tool_inputs
        assert json.loads(first_step.tool_inputs["num_lines"]) == 1

    def test_refactor_works_with_subworkflows(self):
        self.workflow_populator.upload_yaml_workflow(WORKFLOW_NESTED_SIMPLE)
        actions = [
            {
                "action_type": "update_step_label",
                "step": {
                    "label": "nested_workflow"
                },
                "label": "new_nested_workflow"
            },
        ]
        self._refactor(actions)
        self._latest_workflow.step_by_label("new_nested_workflow")

    def test_refactor_works_with_incomplete_state(self):
        # populating a workflow with incomplete state...
        wf = self.workflow_populator.load_workflow_from_resource(
            "test_workflow_two_random_lines")
        ts = json.loads(wf["steps"]["0"]["tool_state"])
        del ts["num_lines"]
        wf["steps"]["0"]["tool_state"] = json.dumps(ts)
        self.workflow_populator.create_workflow(wf, fill_defaults=False)

        assert self._latest_workflow.step_by_index(0).label == "random1"
        actions = [
            {
                "action_type": "update_step_label",
                "step": {
                    "order_index": 0
                },
                "label": "random1_new"
            },
        ]
        self._refactor(actions)
        first_step = self._latest_workflow.step_by_label("random1_new")
        assert "num_lines" not in first_step.tool_inputs

    def test_refactor_works_with_missing_tools(self):
        # populating a workflow with incomplete state...
        wf = self.workflow_populator.load_workflow_from_resource(
            "test_workflow_two_random_lines")
        wf["steps"]["1"]["tool_id"] = "random-missing"
        wf["steps"]["1"]["content_id"] = "random-missing"
        self.workflow_populator.create_workflow(wf, fill_defaults=False)

        assert self._latest_workflow.step_by_index(1).label == "random2"
        assert self._latest_workflow.step_by_index(
            1).tool_id == "random-missing"
        assert "num_lines" in self._latest_workflow.step_by_index(
            1).tool_inputs

        actions = [
            {
                "action_type": "update_step_label",
                "step": {
                    "order_index": 1
                },
                "label": "random2_new"
            },
        ]
        self._refactor(actions)
        assert self._latest_workflow.step_by_index(1).label == "random2_new"
        assert "num_lines" in self._latest_workflow.step_by_index(
            1).tool_inputs

    def test_refactor_fill_step_defaults(self):
        self._load_two_random_lines_wf_with_missing_state()
        actions = [
            {
                "action_type": "fill_step_defaults",
                "step": {
                    "order_index": 0
                }
            },
        ]
        action_executions = self._refactor(actions).action_executions
        first_step = self._latest_workflow.step_by_label("random1")
        assert "num_lines" in first_step.tool_inputs
        assert len(action_executions) == 1
        action_execution = action_executions[0]
        assert len(action_execution.messages) == 1
        message = action_execution.messages[0]
        assert message.order_index == 0
        assert message.step_label == "random1"
        assert message.input_name == "num_lines"

        # ensure other step untouched...
        second_step = self._latest_workflow.step_by_label("random2")
        assert "num_lines" not in second_step.tool_inputs

    def test_refactor_fill_step_defaults_dry_run(self):
        self._load_two_random_lines_wf_with_missing_state()
        actions = [
            {
                "action_type": "fill_step_defaults",
                "step": {
                    "order_index": 0
                }
            },
        ]
        response = self._dry_run(actions)
        action_executions = response.action_executions
        assert len(action_executions) == 1
        action_execution = action_executions[0]
        assert len(action_execution.messages) == 1
        message = action_execution.messages[0]
        assert message.order_index == 0
        assert message.step_label == "random1"
        assert message.input_name == "num_lines"

        # TODO:
        # first_step = self._latest_workflow.step_by_label("random1")
        # assert "num_lines" in first_step.tool_inputs

        # ensure other step untouched...
        # second_step = self._latest_workflow.step_by_label("random2")
        # assert "num_lines" not in second_step.tool_inputs

    def test_refactor_fill_defaults(self):
        self._load_two_random_lines_wf_with_missing_state()
        actions = [
            {
                "action_type": "fill_defaults"
            },
        ]
        action_executions = self._refactor(actions).action_executions

        first_step = self._latest_workflow.step_by_label("random1")
        assert "num_lines" in first_step.tool_inputs
        second_step = self._latest_workflow.step_by_label("random2")
        assert "num_lines" in second_step.tool_inputs

        assert len(action_executions) == 1
        action_execution = action_executions[0]
        assert len(action_execution.messages) == 2
        message = action_execution.messages[0]
        assert message.order_index == 0
        assert message.step_label == "random1"
        assert message.input_name == "num_lines"
        message = action_execution.messages[1]
        assert message.order_index == 1
        assert message.step_label == "random2"
        assert message.input_name == "num_lines"

    def test_tool_version_upgrade_no_state_change(self):
        self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
  the_step:
    tool_id: multiple_versions
    tool_version: '0.1'
    state:
      inttest: 0
""")
        assert self._latest_workflow.step_by_label(
            "the_step").tool_version == "0.1"
        actions = [
            {
                "action_type": "upgrade_tool",
                "step": {
                    "label": "the_step"
                }
            },
        ]
        # t = self._app.toolbox.get_tool("multiple_versions", tool_version="0.1")
        # assert t is not None
        # assert t.version == "0.1"
        action_executions = self._refactor(actions).action_executions
        assert len(action_executions) == 1
        assert len(action_executions[0].messages) == 0
        assert self._latest_workflow.step_by_label(
            "the_step").tool_version == "0.2"

    def test_tool_version_upgrade_state_added(self):
        self.workflow_populator.upload_yaml_workflow("""
class: GalaxyWorkflow
steps:
  the_step:
    tool_id: multiple_versions_changes
    tool_version: '0.1'
    state:
      inttest: 0
""")
        assert self._latest_workflow.step_by_label(
            "the_step").tool_version == "0.1"
        actions = [
            {
                "action_type": "upgrade_tool",
                "step": {
                    "label": "the_step"
                },
                "tool_version": "0.2"
            },
        ]
        action_executions = self._refactor(actions).action_executions

        assert self._latest_workflow.step_by_label(
            "the_step").tool_version == "0.2"

        assert len(action_executions) == 1
        messages = action_executions[0].messages
        assert len(messages) == 1
        message = messages[0]
        assert message.message_type == RefactorActionExecutionMessageTypeEnum.tool_state_adjustment
        assert message.order_index == 0
        assert message.step_label == "the_step"
        assert message.input_name == "floattest"

    def test_subworkflow_upgrade_simplest(self):
        self.workflow_populator.upload_yaml_workflow(WORKFLOW_NESTED_SIMPLE)
        # second oldest workflow will be the nested workflow, grab it and update...
        nested_stored_workflow = self._recent_stored_workflow(2)
        assert len(nested_stored_workflow.workflows) == 1

        self._increment_nested_workflow_version(nested_stored_workflow,
                                                num_lines_from="1",
                                                num_lines_to="2")
        self._app.model.session.expunge(nested_stored_workflow)
        # ensure subworkflow updated properly...
        nested_stored_workflow = self._recent_stored_workflow(2)
        assert len(nested_stored_workflow.workflows) == 2
        updated_nested_step = nested_stored_workflow.latest_workflow.step_by_label(
            "random_lines")
        assert updated_nested_step.tool_inputs["num_lines"] == "2"

        # we now have an nested workflow with a simple update, download
        # the target workflow and ensure it is pointing at the old version
        pre_upgrade_native = self._download_native(
            self._most_recent_stored_workflow)
        self._assert_nested_workflow_num_lines_is(pre_upgrade_native, "1")

        actions = [
            {
                "action_type": "upgrade_subworkflow",
                "step": {
                    "label": "nested_workflow"
                }
            },
        ]
        response = self._dry_run(actions)
        action_executions = response.action_executions
        assert len(action_executions) == 1
        assert len(action_executions[0].messages) == 0

        action_executions = self._refactor(actions).action_executions
        assert len(action_executions) == 1
        assert len(action_executions[0].messages) == 0

        post_upgrade_native = self._download_native(
            self._most_recent_stored_workflow)
        self._assert_nested_workflow_num_lines_is(post_upgrade_native, "2")

    def test_subworkflow_upgrade_specified(self):
        self.workflow_populator.upload_yaml_workflow(WORKFLOW_NESTED_SIMPLE)
        # second oldest workflow will be the nested workflow, grab it and update...
        nested_stored_workflow = self._recent_stored_workflow(2)

        # create two versions so we can test jumping to the middle one...
        self._increment_nested_workflow_version(nested_stored_workflow,
                                                num_lines_from="1",
                                                num_lines_to="20")
        self._increment_nested_workflow_version(nested_stored_workflow,
                                                num_lines_from="20",
                                                num_lines_to="30")
        self._app.model.session.expunge(nested_stored_workflow)
        # ensure subworkflow updated properly...
        nested_stored_workflow = self._recent_stored_workflow(2)
        assert len(nested_stored_workflow.workflows) == 3
        middle_workflow_id = self._app.security.encode_id(
            nested_stored_workflow.workflows[1].id)
        actions = [
            {
                "action_type": "upgrade_subworkflow",
                "step": {
                    "label": "nested_workflow"
                },
                "content_id": middle_workflow_id
            },
        ]
        action_executions = self._dry_run(actions).action_executions
        assert len(action_executions) == 1
        assert len(action_executions[0].messages) == 0

        action_executions = self._refactor(actions).action_executions
        assert len(action_executions) == 1
        assert len(action_executions[0].messages) == 0
        post_upgrade_native = self._download_native(
            self._most_recent_stored_workflow)
        self._assert_nested_workflow_num_lines_is(post_upgrade_native, "20")

    def test_subworkflow_upgrade_connection_input_dropped(self):
        self.workflow_populator.upload_yaml_workflow(WORKFLOW_NESTED_SIMPLE)

        nested_stored_workflow = self._recent_stored_workflow(2)
        actions = [
            {
                "action_type": "update_step_label",
                "step": {
                    "label": "inner_input"
                },
                "label": "renamed_inner_input"
            },
        ]
        self._refactor(actions, stored_workflow=nested_stored_workflow)

        actions = [
            {
                "action_type": "upgrade_subworkflow",
                "step": {
                    "label": "nested_workflow"
                }
            },
        ]
        action_executions = self._refactor(actions).action_executions
        native_dict = self._download_native()
        nested_step = _step_with_label(native_dict, "nested_workflow")
        # order_index of subworkflow shifts down from "2" because it has no
        # inbound inputs
        assert nested_step["subworkflow"]["steps"]["0"][
            "label"] == "renamed_inner_input"
        assert len(action_executions) == 1
        messages = action_executions[0].messages
        assert len(messages) == 1

        message = messages[0]
        assert message.message_type == RefactorActionExecutionMessageTypeEnum.connection_drop_forced
        assert message.order_index == 2
        assert message.step_label == "nested_workflow"
        assert message.input_name == "inner_input"
        assert message.from_step_label == "first_cat"
        assert message.from_order_index == 1
        assert message.output_name == "out_file1"

    def test_subworkflow_upgrade_connection_output_dropped(self):
        self.workflow_populator.upload_yaml_workflow(WORKFLOW_NESTED_SIMPLE)

        nested_stored_workflow = self._recent_stored_workflow(2)
        actions = [{
            "action_type": "update_output_label",
            "output": {
                "label": "random_lines",
                "output_name": "out_file1"
            },
            "output_label": "renamed_output",
        }]
        self._refactor(actions, stored_workflow=nested_stored_workflow)

        actions = [
            {
                "action_type": "upgrade_subworkflow",
                "step": {
                    "label": "nested_workflow"
                }
            },
        ]
        action_executions = self._refactor(actions).action_executions
        assert len(action_executions) == 1
        messages = action_executions[0].messages

        # it was connected to two inputs on second_cat step
        assert len(messages) == 2
        for message in messages:
            assert message.message_type == RefactorActionExecutionMessageTypeEnum.connection_drop_forced
            assert message.order_index == 3
            assert message.step_label == "second_cat"
            assert message.input_name in ["input1", "queries_0|input2"]
            assert message.from_step_label == "nested_workflow"
            assert message.from_order_index == 2
            assert message.output_name == "workflow_output"

    def test_subworkflow_upgrade_output_label_dropped(self):
        self.workflow_populator.upload_yaml_workflow(
            WORKFLOW_NESTED_RUNTIME_PARAMETER)

        nested_stored_workflow = self._recent_stored_workflow(2)
        actions = [{
            "action_type": "update_output_label",
            "output": {
                "label": "random_lines",
                "output_name": "out_file1"
            },
            "output_label": "renamed_output",
        }]
        self._refactor(actions, stored_workflow=nested_stored_workflow)

        actions = [
            {
                "action_type": "upgrade_subworkflow",
                "step": {
                    "label": "nested_workflow"
                }
            },
        ]
        action_executions = self._refactor(actions).action_executions
        assert len(action_executions) == 1
        messages = action_executions[0].messages
        assert len(messages) == 1

        message = messages[0]
        assert message.message_type == RefactorActionExecutionMessageTypeEnum.workflow_output_drop_forced
        assert message.order_index == 1
        assert message.step_label == "nested_workflow"
        assert message.output_name == "workflow_output"
        assert message.output_label == "outer_output"

    def test_upgrade_all_steps(self):
        self.install_repository("iuc", "compose_text_param",
                                "feb3acba1e0a")  # 0.1.0
        self.install_repository("iuc", "compose_text_param",
                                "e188c9826e0f")  # 0.1.1
        self.workflow_populator.upload_yaml_workflow(
            WORKFLOW_NESTED_WITH_MULTIPLE_VERSIONS_TOOL)
        nested_stored_workflow = self._recent_stored_workflow(2)
        assert self._latest_workflow.step_by_label(
            "tool_update_step").tool_version == "0.1"
        updated_nested_step = nested_stored_workflow.latest_workflow.step_by_label(
            "random_lines")
        assert updated_nested_step.tool_inputs["num_lines"] == "1"

        self._increment_nested_workflow_version(nested_stored_workflow,
                                                num_lines_from="1",
                                                num_lines_to="2")
        self._app.model.session.expunge(nested_stored_workflow)
        # ensure subworkflow updated properly...
        nested_stored_workflow = self._recent_stored_workflow(2)
        assert len(nested_stored_workflow.workflows) == 2
        actions = [
            {
                "action_type": "upgrade_all_steps"
            },
        ]
        action_executions = self._refactor(actions).action_executions
        assert self._latest_workflow.step_by_label(
            "tool_update_step").tool_version == "0.2"
        nested_stored_workflow = self._recent_stored_workflow(2)
        updated_nested_step = nested_stored_workflow.latest_workflow.step_by_label(
            "random_lines")
        assert updated_nested_step.tool_inputs["num_lines"] == "2"
        assert self._latest_workflow.step_by_label(
            "compose_text_param").tool_version == '0.1.1'
        assert self._latest_workflow.step_by_label(
            "compose_text_param"
        ).tool_id == 'toolshed.g2.bx.psu.edu/repos/iuc/compose_text_param/compose_text_param/0.1.1'

        assert len(action_executions) == 1
        messages = action_executions[0].messages
        assert len(messages) == 1
        message = messages[0]
        assert message.message_type == RefactorActionExecutionMessageTypeEnum.connection_drop_forced
        assert message.order_index == 2
        assert message.step_label == "tool_update_step"
        assert message.output_name == "output"

    def _download_native(self, workflow=None):
        workflow = workflow or self._most_recent_stored_workflow
        workflow_id = self._app.security.encode_id(workflow.id)
        return self.workflow_populator.download_workflow(workflow_id)

    @contextlib.contextmanager
    def _export_for_update(self, workflow):
        workflow_id = self._app.security.encode_id(workflow.id)
        with self.workflow_populator.export_for_update(
                workflow_id) as workflow_object:
            yield workflow_object

    def _refactor(self,
                  actions,
                  stored_workflow=None,
                  dry_run=False,
                  style="ga"):
        user = self._app.model.session.query(User).order_by(
            User.id.desc()).limit(1).one()
        mock_trans = MockTrans(self._app, user)

        app = self._app
        original_url_for = app.url_for

        def url_for(*args, **kwd):
            return ''

        app.url_for = url_for
        try:
            return self._manager.refactor(
                mock_trans, stored_workflow
                or self._most_recent_stored_workflow,
                RefactorRequest(actions=actions, dry_run=dry_run, style=style))
        finally:
            app = url_for = original_url_for

    def _dry_run(self, actions, stored_workflow=None):
        # Do a bunch of checks to ensure nothing workflow related was written to the database
        # or even added to the sa_session.
        sa_session = self._app.model.session
        sa_session.flush()

        sw_update_time = self._model_last_time(StoredWorkflow)
        assert sw_update_time
        w_update_time = self._model_last_time(Workflow)
        assert w_update_time
        ws_last_id = self._model_last_id(WorkflowStep)
        assert ws_last_id
        wsc_last_id = self._model_last_id(WorkflowStepConnection)
        pja_last_id = self._model_last_id(PostJobAction)
        pjaa_last_id = self._model_last_id(PostJobActionAssociation)
        wo_last_id = self._model_last_id(WorkflowOutput)

        response = self._refactor(actions,
                                  stored_workflow=stored_workflow,
                                  dry_run=True)
        sa_session.flush()
        sa_session.expunge_all()
        assert sw_update_time == self._model_last_time(StoredWorkflow)
        assert w_update_time == self._model_last_time(Workflow)
        assert ws_last_id == self._model_last_id(WorkflowStep)
        assert wsc_last_id == self._model_last_id(WorkflowStepConnection)
        assert pja_last_id == self._model_last_id(PostJobAction)
        assert pjaa_last_id == self._model_last_id(PostJobActionAssociation)
        assert wo_last_id == self._model_last_id(WorkflowOutput)

        return response

    def _model_last_time(self, clazz):
        obj = self._app.model.session.query(clazz).order_by(
            clazz.update_time.desc()).limit(1).one()
        return obj.update_time

    def _model_last_id(self, clazz):
        obj = self._app.model.session.query(clazz).order_by(
            clazz.id.desc()).limit(1).one_or_none()
        return obj.id if obj else None

    @property
    def _manager(self):
        return self._app.workflow_contents_manager

    @property
    def _most_recent_stored_workflow(self):
        return self._recent_stored_workflow(1)

    def _recent_stored_workflow(self, n=1):
        app = self._app
        return app.model.session.query(StoredWorkflow).order_by(
            StoredWorkflow.id.desc()).limit(n).all()[-1]

    @property
    def _latest_workflow(self):
        return self._most_recent_stored_workflow.latest_workflow

    def _increment_nested_workflow_version(self,
                                           nested_stored_workflow,
                                           num_lines_from="1",
                                           num_lines_to="2"):
        # increment nested workflow from WORKFLOW_NESTED_SIMPLE with
        # new num_lines in the tool state of the random_lines1 step.
        with self._export_for_update(
                nested_stored_workflow) as native_workflow_dict:
            tool_step = native_workflow_dict["steps"]["1"]
            assert tool_step["type"] == "tool"
            assert tool_step["tool_id"] == "random_lines1"
            tool_state_json = tool_step["tool_state"]
            tool_state = json.loads(tool_state_json)
            assert tool_state["num_lines"] == num_lines_from
            tool_state["num_lines"] = num_lines_to
            tool_step["tool_state"] = json.dumps(tool_state)

    def _assert_nested_workflow_num_lines_is(self, native_dict, num_lines):
        # assuming native_dict is the .ga representation of WORKFLOW_NESTED_SIMPLE,
        # or some update created with _increment_nested_workflow_version, assert
        # the nested num_lines step is as specified
        target_out_step = native_dict["steps"]["2"]
        assert "subworkflow" in target_out_step
        target_subworkflow = target_out_step["subworkflow"]
        target_state_json = target_subworkflow["steps"]["1"]["tool_state"]
        target_state = json.loads(target_state_json)
        assert target_state["num_lines"] == num_lines

    def _load_two_random_lines_wf_with_missing_state(self):
        wf = self.workflow_populator.load_workflow_from_resource(
            "test_workflow_two_random_lines")
        ts = json.loads(wf["steps"]["0"]["tool_state"])
        del ts["num_lines"]
        wf["steps"]["0"]["tool_state"] = json.dumps(ts)
        wf["steps"]["1"]["tool_state"] = json.dumps(ts)
        self.workflow_populator.create_workflow(wf, fill_defaults=False)

        first_step = self._latest_workflow.step_by_label("random1")
        assert "num_lines" not in first_step.tool_inputs
        second_step = self._latest_workflow.step_by_label("random2")
        assert "num_lines" not in second_step.tool_inputs
 def setUp(self):
     super().setUp()
     self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
     self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
 def setUp(self):
     super(FailJobWhenToolUnavailableTestCase, self).setUp()
     self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
     self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
     self.history_id = self.dataset_populator.new_history()