Example #1
0
def test_add_dir_contents_to_repo_dry_run():
    ctx = test_context()
    ctx._global_config = {"github": {"access_token": 'ABCDEFG'}}
    with temp_directory() as test_dir, temp_directory() as repo_dir:
        with open(os.path.join(test_dir, 'Readme.md'), 'w') as readme:
            readme.write('#Very important!')
        git.init(ctx, repo_path=repo_dir)
        add_dir_contents_to_repo(ctx,
                                 from_dir=test_dir,
                                 target_dir="workflows/my-cool-workflow",
                                 target_repository_path=repo_dir,
                                 version=1.0,
                                 notes='The big release!',
                                 dry_run=True)
Example #2
0
def _realize_effective_repositories(ctx, path, **kwds):
    """ Expands folders in a source code repository into tool shed
    repositories.

    Each folder may have nested repositories and each folder may corresponding
    to many repositories (for instance if a folder has n tools in the source
    code repository but are published to the tool shed as one repository per
    tool).
    """
    raw_repo_objects = _find_raw_repositories(path, **kwds)
    failed = False
    with temp_directory() as base_dir:
        for raw_repo_object in raw_repo_objects:
            if isinstance(raw_repo_object, Exception):
                _handle_realization_error(raw_repo_object, **kwds)
                failed = True
                continue

            realized_repos = raw_repo_object.realizations(
                ctx,
                base_dir,
                **kwds
            )
            for realized_repo in realized_repos:
                if isinstance(realized_repo, Exception):
                    _handle_realization_error(realized_repo, **kwds)
                    failed = True
                    continue
                yield realized_repo
    if failed:
        raise RealizationException()
Example #3
0
def test_changelog_in_repo():
    CHANGELOG = """
# Changelog

## [0.2]

### Changed

- Turn the AmpliconRemoval variant FILTER into an AmpliconBias INFO flag


## [0.1]

- Initial version of COVID-19: variation analysis on ARTIC PE data workflow
    """
    EXPECTED_FRAGMENT = """## [0.2]

### Changed

- Turn the AmpliconRemoval variant FILTER into an AmpliconBias INFO flag
"""
    with temp_directory() as test_dir:
        with open(os.path.join(test_dir, "CHANGELOG.md"), 'w') as changelog:
            changelog.write(CHANGELOG)
        assert changelog_in_repo(test_dir) == EXPECTED_FRAGMENT
Example #4
0
def _realize_effective_repositories(ctx, path, **kwds):
    """ Expands folders in a source code repository into tool shed
    repositories.

    Each folder may have nested repositories and each folder may corresponding
    to many repositories (for instance if a folder has n tools in the source
    code repository but are published to the tool shed as one repository per
    tool).
    """
    raw_repo_objects = _find_raw_repositories(ctx, path, **kwds)
    failed = False
    with temp_directory() as base_dir:
        for raw_repo_object in raw_repo_objects:
            if isinstance(raw_repo_object, Exception):
                _handle_realization_error(raw_repo_object, **kwds)
                failed = True
                continue

            realized_repos = raw_repo_object.realizations(
                ctx,
                base_dir,
                **kwds
            )
            for realized_repo in realized_repos:
                if isinstance(realized_repo, Exception):
                    _handle_realization_error(realized_repo, **kwds)
                    failed = True
                    continue
                yield realized_repo
    if failed:
        raise RealizationException()
Example #5
0
def diff_repo(ctx, realized_repository, **kwds):
    """Compare two repositories (local or remote) and check for differences.

    Returns 0 if and only the repositories are effectively the same
    given supplied kwds for comparison description.
    """
    with temp_directory("tool_shed_diff_") as working:
        return _diff_in(ctx, working, realized_repository, **kwds)
Example #6
0
def test_add_dir_contents_to_repo():
    ctx = test_context()
    ctx._global_config = {"github": {"access_token": 'ABCDEFG'}}
    with temp_directory() as test_dir, temp_directory() as repo_dir:
        with open(os.path.join(test_dir, 'Readme.md'), 'w') as readme:
            readme.write('#Very important!')
        git.init(ctx, repo_path=repo_dir)
        with pytest.raises(RuntimeError) as excinfo:
            # Can't push without remote
            add_dir_contents_to_repo(ctx,
                                     from_dir=test_dir,
                                     target_dir="workflows/my-cool-workflow",
                                     target_repository_path=repo_dir,
                                     version=1.0,
                                     notes='The big release!',
                                     dry_run=False)
        assert "Problem executing commands git push" in str(excinfo.value)
Example #7
0
def diff_repo(ctx, realized_repository, **kwds):
    """Compare two repositories (local or remote) and check for differences.

    Returns 0 if and only the repositories are effectively the same
    given supplied kwds for comparison description.
    """
    with temp_directory("tool_shed_diff_") as working:
        return _diff_in(ctx, working, realized_repository, **kwds)
Example #8
0
def cli(ctx, paths, **kwds):
    """Run specified tool's tests within Galaxy.

    All referenced tools (by default all the tools in the current working
    directory) will be tested and the results quickly summarized.

    To run these tests planemo needs a Galaxy instance to utilize, planemo
    will search parent directories to see if any is a Galaxy instance
    - but one can pick the Galaxy instance to use with the --galaxy_root
    option or force planemo to download a disposable instance with the
    ``--install_galaxy`` flag.

    In additon to to quick summary printed to the console - various detailed
    output summaries can be configured. ``tool_test_output.html`` (settable
    via ``--test_output``) will contain a human consumable HTML report
    describing the test run. A JSON file (settable via ``--test_output_json``
    and defaulting to ``tool_test_output.json``) will also be created. These
    files can can be disabled by passing in empty arguments or globally by
    setting the values ``default_test_output`` and/or
    ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For
    continuous integration testing a xUnit-style report can be confiured using
    the ``--test_output_xunit``.

    planemo uses temporarily generated config files and environment variables
    to attempt to shield this execution of Galaxy from manually launched runs
    against that same Galaxy root - but this may not be bullet proof yet so
    please careful and do not try this against production Galaxy instances.
    """
    with temp_directory(dir=ctx.planemo_directory) as temp_path:
        # Create temp dir(s) outside of temp, docker can't mount $TEMPDIR on OSX
        runnables = for_paths(paths, temp_path=temp_path)
        is_cwl = all(r.type in {RunnableType.cwl_tool, RunnableType.cwl_workflow} for r in runnables)
        if kwds.get("engine") is None:
            kwds["engine"] = "galaxy" if not is_cwl else "cwltool"

        engine_type = kwds["engine"]
        test_engine_testable = {RunnableType.galaxy_tool, RunnableType.galaxy_datamanager, RunnableType.directory}
        enable_test_engines = any(r.type not in test_engine_testable for r in runnables)
        enable_test_engines = enable_test_engines or engine_type != "galaxy"
        if enable_test_engines:
            ctx.vlog("Using test engine type %s" % engine_type)
            with engine_context(ctx, **kwds) as engine:
                test_data = engine.test(runnables)
                return_value = handle_reports_and_summary(ctx, test_data.structured_data, kwds=kwds)
        else:
            ctx.vlog("Running traditional Galaxy tool tests using run_tests.sh in Galaxy root %s" % engine_type)
            kwds["for_tests"] = True
            if kwds.get('update_test_data'):
                non_copied_runnables = for_paths(paths)
                kwds['test_data_target_dir'] = _find_test_data(non_copied_runnables, **kwds)
            with galaxy_config(ctx, runnables, **kwds) as config:
                return_value = run_in_config(ctx, config, **kwds)

    ctx.exit(return_value)
Example #9
0
def cli(ctx, uris, **kwds):
    """Run specified tool's tests within Galaxy.

    All referenced tools (by default all the tools in the current working
    directory) will be tested and the results quickly summarized.

    To run these tests planemo needs a Galaxy instance to utilize, planemo
    will search parent directories to see if any is a Galaxy instance
    - but one can pick the Galaxy instance to use with the --galaxy_root
    option or force planemo to download a disposable instance with the
    ``--install_galaxy`` flag.

    In addition to to quick summary printed to the console - various detailed
    output summaries can be configured. ``tool_test_output.html`` (settable
    via ``--test_output``) will contain a human consumable HTML report
    describing the test run. A JSON file (settable via ``--test_output_json``
    and defaulting to ``tool_test_output.json``) will also be created. These
    files can can be disabled by passing in empty arguments or globally by
    setting the values ``default_test_output`` and/or
    ``default_test_output_json`` in ``~/.planemo.yml`` to ``null``. For
    continuous integration testing a xUnit-style report can be configured using
    the ``--test_output_xunit``.

    planemo uses temporarily generated config files and environment variables
    to attempt to shield this execution of Galaxy from manually launched runs
    against that same Galaxy root - but this may not be bullet proof yet so
    please careful and do not try this against production Galaxy instances.
    """
    with temp_directory(dir=ctx.planemo_directory) as temp_path:
        # Create temp dir(s) outside of temp, docker can't mount $TEMPDIR on OSX
        runnables = for_runnable_identifiers(ctx,
                                             uris,
                                             kwds,
                                             temp_path=temp_path)

        # pick a default engine type if needed
        is_cwl = all(
            r.type in {RunnableType.cwl_tool, RunnableType.cwl_workflow}
            for r in runnables)
        if kwds.get("engine", None) is None:
            if is_cwl:
                kwds["engine"] = "cwltool"
            elif kwds.get('galaxy_url', None):
                kwds["engine"] = "external_galaxy"
            else:
                kwds["engine"] = "galaxy"

        return_value = test_runnables(ctx,
                                      runnables,
                                      original_paths=uris,
                                      **kwds)

    ctx.exit(return_value)
Example #10
0
def _path_on_disk(ctx, path):
    git_path = None
    if path.startswith("git:"):
        git_path = path
    elif path.startswith("git+"):
        git_path = path[len("git+"):]
    if git_path is None:
        yield path
    else:
        with temp_directory() as git_repo:
            git.clone(ctx, git_path, git_repo)
            yield git_repo
Example #11
0
def _path_on_disk(ctx, path):
    git_path = None
    if path.startswith("git:"):
        git_path = path
    elif path.startswith("git+"):
        git_path = path[len("git+"):]
    if git_path is None:
        yield path
    else:
        with temp_directory() as git_repo:
            git.clone(ctx, git_path, git_repo)
            yield git_repo
Example #12
0
def cli(ctx, paths, **kwds):
    """Auto-update tool requirements by checking against Conda and updating if newer versions are available."""
    assert_tools = kwds.get("assert_tools", True)
    recursive = kwds.get("recursive", False)
    exit_codes = []
    modified_files = set()
    tools_to_skip = [line.rstrip() for line in open(kwds['skiplist'])
                     ] if kwds['skiplist'] else []
    for (tool_path,
         tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive):
        if tool_path.split('/')[-1] in tools_to_skip:
            info("Skipping tool %s" % tool_path)
            continue
        info("Auto-updating tool %s" % tool_path)
        try:
            updated = autoupdate.autoupdate_tool(ctx,
                                                 tool_path,
                                                 modified_files=modified_files,
                                                 **kwds)
            if updated:
                modified_files.update(updated)
        except Exception as e:
            error(
                "{} could not be updated - the following error was raised: {}".
                format(tool_path, e.__str__()))
        if handle_tool_load_error(tool_path, tool_xml):
            exit_codes.append(EXIT_CODE_GENERIC_FAILURE)
            continue
        else:
            exit_codes.append(EXIT_CODE_OK)

    if kwds['test']:
        if not modified_files:
            info("No tools were updated, so no tests were run.")
        else:
            with temp_directory(dir=ctx.planemo_directory) as temp_path:
                # only test tools in updated directories
                modified_paths = [
                    path for path, tool_xml in yield_tool_sources_on_paths(
                        ctx, paths, recursive) if path in modified_files
                ]
                info(
                    f"Running tests for the following auto-updated tools: {', '.join(modified_paths)}"
                )
                runnables = for_paths(modified_paths, temp_path=temp_path)
                kwds["engine"] = "galaxy"
                return_value = test_runnables(ctx,
                                              runnables,
                                              original_paths=paths,
                                              **kwds)
                exit_codes.append(return_value)
    return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)
Example #13
0
def _path_on_disk(path):
    git_path = None
    if path.startswith("git:"):
        git_path = path
    elif path.startswith("git+"):
        git_path = path[len("git+"):]
    if git_path is None:
        yield path
    else:
        with temp_directory() as git_repo:
            # TODO: pass ctx down through
            git.clone(None, git_path, git_repo)
            yield git_repo
Example #14
0
def _path_on_disk(path):
    git_path = None
    if path.startswith("git:"):
        git_path = path
    elif path.startswith("git+"):
        git_path = path[len("git+"):]
    if git_path is None:
        yield path
    else:
        with temp_directory() as git_repo:
            # TODO: pass ctx down through
            git.clone(None, git_path, git_repo)
            yield git_repo
Example #15
0
def cli(ctx, paths, **kwds):  # noqa C901
    """Auto-update tool requirements by checking against Conda and updating if newer versions are available."""
    assert_tools = kwds.get("assert_tools", True)
    recursive = kwds.get("recursive", False)
    exit_codes = []
    modified_files = set()
    tools_to_skip = [line.rstrip() for line in open(kwds['skiplist'])
                     ] if kwds['skiplist'] else []
    runnables = for_paths(paths)

    if any(r.type in {RunnableType.galaxy_tool, RunnableType.directory}
           for r in runnables):
        # update Galaxy tools
        for (tool_path,
             tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive):
            if tool_path.split('/')[-1] in tools_to_skip:
                info("Skipping tool %s" % tool_path)
                continue
            info("Auto-updating tool %s" % tool_path)
            try:
                updated = autoupdate.autoupdate_tool(
                    ctx, tool_path, modified_files=modified_files, **kwds)
                if updated:
                    modified_files.update(updated)
            except Exception as e:
                error(
                    f"{tool_path} could not be updated - the following error was raised: {e.__str__()}"
                )
            if handle_tool_load_error(tool_path, tool_xml):
                exit_codes.append(EXIT_CODE_GENERIC_FAILURE)
                continue
            else:
                exit_codes.append(EXIT_CODE_OK)

    workflows = [
        r for r in runnables if r.type == RunnableType.galaxy_workflow
    ]
    modified_workflows = []
    if workflows:
        assert is_galaxy_engine(**kwds)
        if kwds.get("engine") != "external_galaxy":
            kwds["install_most_recent_revision"] = True
            kwds["install_resolver_dependencies"] = False
            kwds["install_repository_dependencies"] = False
            kwds['shed_install'] = True

        with engine_context(ctx, **kwds) as galaxy_engine:
            with galaxy_engine.ensure_runnables_served(workflows) as config:
                for workflow in workflows:
                    if config.updated_repos.get(workflow.path) or kwds.get(
                            "engine") == "external_galaxy":
                        info("Auto-updating workflow %s" % workflow.path)
                        updated_workflow = autoupdate.autoupdate_wf(
                            ctx, config, workflow)
                        if workflow.path.endswith(".ga"):
                            with open(workflow.path, 'w') as f:
                                json.dump(updated_workflow,
                                          f,
                                          indent=4,
                                          sort_keys=True)
                        else:
                            format2_wrapper = from_galaxy_native(
                                updated_workflow, json_wrapper=True)
                            with open(workflow.path, "w") as f:
                                f.write(format2_wrapper["yaml_content"])
                        modified_workflows.append(workflow.path)
                    else:
                        info(
                            "No newer tool versions were found, so the workflow was not updated."
                        )

    if kwds['test']:
        if not modified_files:
            info("No tools were updated, so no tests were run.")
        else:
            with temp_directory(dir=ctx.planemo_directory) as temp_path:
                # only test tools in updated directories
                modified_paths = [
                    path for path, tool_xml in yield_tool_sources_on_paths(
                        ctx, paths, recursive) if path in modified_files
                ]
                info(
                    f"Running tests for the following auto-updated tools: {', '.join(modified_paths)}"
                )
                runnables = for_paths(modified_paths + modified_workflows,
                                      temp_path=temp_path)
                kwds["engine"] = "galaxy"
                return_value = test_runnables(ctx,
                                              runnables,
                                              original_paths=paths,
                                              **kwds)
                exit_codes.append(return_value)
    return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)
Example #16
0
def diff_repo(ctx, realized_repository, **kwds):
    with temp_directory("tool_shed_diff_") as working:
        return _diff_in(ctx, working, realized_repository, **kwds)
Example #17
0
def diff_repo(ctx, realized_repository, **kwds):
    with temp_directory("tool_shed_diff_") as working:
        return _diff_in(ctx, working, realized_repository, **kwds)