Exemplo n.º 1
0
def _create_shed_config(ctx, path, **kwds):
    name = kwds.get("name", None) or path_to_repo_name(os.path.dirname(path))
    name_invalid = validate_repo_name(name)
    if name_invalid:
        error(name_invalid)
        return 1

    owner = kwds.get("owner", None)
    if owner is None:
        owner = ctx.global_config.get("shed_username", None)
    owner_invalid = validate_repo_owner(owner)
    if owner_invalid:
        error(owner_invalid)
        return 1
    description = kwds.get("description", None) or name
    long_description = kwds.get("long_description", None)
    remote_repository_url = kwds.get("remote_repository_url", None)
    homepage_url = kwds.get("homepage_url", None)
    categories = kwds.get("category", [])
    config = dict(
        name=name,
        owner=owner,
        description=description,
        long_description=long_description,
        remote_repository_url=remote_repository_url,
        homepage_url=homepage_url,
        categories=categories,
    )
    # Remove empty entries...
    for k in list(config.keys()):
        if config[k] is None:
            del config[k]

    with open(path, "w") as f:
        yaml.safe_dump(config, f)
Exemplo n.º 2
0
 def __init__(self, json_path):
     self.json_path = json_path
     if not os.path.exists(json_path):
         error("Warning: Problem with target Galaxy, it did not "
               "produce a structured test results files - summary "
               "information and planemo reports will be incorrect."
               )
     else:
         try:
             with open(json_path, "r") as output_json_f:
                 structured_data = json.load(output_json_f)
                 structured_data_tests = structured_data["tests"]
         except Exception:
             error("Galaxy produced invalid JSON for structured data - summary "
                   "information and planemo reports will be incorrect.")
             structured_data = {}
             structured_data_tests = {}
     self.structured_data = structured_data
     self.structured_data_tests = structured_data_tests
     structured_data_by_id = {}
     for test in self.structured_data_tests:
         structured_data_by_id[test["id"]] = test["data"]
     self.structured_data_by_id = structured_data_by_id
     self.has_details = "summary" in structured_data
     if self.has_details:
         self.read_summary()
Exemplo n.º 3
0
def cli(ctx, path, **kwds):
    """Source output to activate a conda environment for this tool.

        % . <(planemo conda_env bowtie2.xml)
        % which bowtie2
        TODO_PLACE_PATH_HERE
    """
    conda_context = build_conda_context(use_planemo_shell_exec=False, **kwds)
    conda_targets = collect_conda_targets(
        path, conda_context=conda_context
    )
    installed_conda_targets = conda_util.filter_installed_targets(
        conda_targets, conda_context=conda_context
    )
    env_name, exit_code = conda_util.build_isolated_environment(
        installed_conda_targets, conda_context=conda_context
    )
    if exit_code:
        error("Failed to build environmnt for request.")
        return 1

    ps1 = ps1_for_path(path, base="PRE_CONDA_PS1")
    remove_env = "%s env remove -y --name '%s'" % (
        conda_context.conda_exec, env_name
    )
    deactivate = conda_context.deactivate
    activate = conda_context.activate
    command = SOURCE_COMMAND % (
        activate, env_name, ps1,
        deactivate, remove_env
    )
    print(command)
Exemplo n.º 4
0
def handle_lint_complete(lint_ctx, lint_args, failed=False):
    """Complete linting of a target and decide exit code."""
    if not failed:
        failed = lint_ctx.failed(lint_args["fail_level"])
    if failed:
        error("Failed linting")
    return 1 if failed else 0
Exemplo n.º 5
0
def run_cwltool(ctx, path, job_path, **kwds):
    """Translate planemo kwds to cwltool kwds and run cwltool main function."""
    ensure_cwltool_available()

    args = []
    conformance_test = kwds.get("conformance_test", False)
    if conformance_test:
        args.append("--conformance-test")
    if ctx.verbose:
        args.append("--verbose")
    output_directory = kwds.get("output_directory", None)
    if output_directory:
        args.append("--outdir")
        args.append(output_directory)
    if kwds.get("no_container", False):
        args.append("--no-container")

    args.extend([path, job_path])
    ctx.vlog("Calling cwltool with arguments %s" % args)
    with tempfile.NamedTemporaryFile() as tmp_stdout, \
            tempfile.NamedTemporaryFile() as tmp_stderr:
        # cwltool passes sys.stderr to subprocess.Popen - ensure it has
        # and actual fileno.
        with real_io():
            ret_code = main.main(
                args,
                stdout=tmp_stdout,
                stderr=tmp_stderr
            )
        tmp_stdout.flush()
        tmp_stderr.flush()
        with open(tmp_stderr.name, "r") as stderr_f:
            log = stderr_f.read()
            ctx.vlog("cwltool log output [%s]" % log)
        with open(tmp_stdout.name, "r") as stdout_f:
            try:
                result = json.load(stdout_f)
            except ValueError:
                message = JSON_PARSE_ERROR_MESSAGE % (
                    open(tmp_stdout.name, "r").read(),
                    tmp_stdout.name,
                    log
                )
                error(message)
                raise Exception(message)

        if ret_code != 0:
            return ErrorRunResponse("Error running cwltool", log=log)
        if conformance_test:
            cwl_command_state = result
            outputs = None
        else:
            cwl_command_state = None
            outputs = result
    return CwlToolRunResponse(
        log,
        cwl_command_state=cwl_command_state,
        outputs=outputs,
    )
Exemplo n.º 6
0
def run_cwltool(ctx, path, job_path, **kwds):
    """Translate planemo kwds to cwltool kwds and run cwltool main function."""
    ensure_cwltool_available()

    args = []
    if ctx.verbose:
        args.append("--verbose")
    output_directory = kwds.get("output_directory", None)
    if output_directory:
        args.append("--outdir")
        args.append(output_directory)
    if kwds.get("no_container", False):
        args.append("--no-container")
        ensure_dependency_resolvers_conf_configured(ctx, kwds)
        args.append("--beta-dependency-resolvers-configuration")
        args.append(kwds["dependency_resolvers_config_file"])
    if kwds.get("mulled_containers"):
        args.append("--beta-use-biocontainers")

    if kwds.get("non_strict_cwl", False):
        args.append("--non-strict")

    args.extend([path, job_path])
    ctx.vlog("Calling cwltool with arguments %s" % args)
    with tempfile.NamedTemporaryFile("w") as tmp_stdout, \
            tempfile.NamedTemporaryFile("w") as tmp_stderr:
        # cwltool passes sys.stderr to subprocess.Popen - ensure it has
        # and actual fileno.
        with real_io():
            ret_code = main.main(
                args,
                stdout=tmp_stdout,
                stderr=tmp_stderr,
            )
        tmp_stdout.flush()
        tmp_stderr.flush()
        with open(tmp_stderr.name, "r") as stderr_f:
            log = stderr_f.read()
            ctx.vlog("cwltool log output [%s]" % log)
        with open(tmp_stdout.name, "r") as stdout_f:
            try:
                result = json.load(stdout_f)
            except ValueError:
                message = JSON_PARSE_ERROR_MESSAGE % (
                    open(tmp_stdout.name, "r").read(),
                    tmp_stdout.name,
                    log
                )
                error(message)
                raise Exception(message)

        if ret_code != 0:
            return ErrorRunResponse("Error running cwltool", log=log)
        outputs = result
    return CwlToolRunResponse(
        log,
        outputs=outputs,
    )
Exemplo n.º 7
0
def cli(ctx, path, **kwds):
    """Generate various tool test reports (HTML, text, markdown) from
    structure output from tests (tool_test_output.json).
    """
    if not os.path.exists(path):
        io.error("Failed to tool test json file at %s" % path)
        return 1

    test_data = StructuredData(path)
    handle_reports(ctx, test_data, kwds)
Exemplo n.º 8
0
 def _with_ts_exception_handling(self, f):
     try:
         return f()
     except Exception as e:
         # TODO: galaxyproject/bioblend#126
         try:
             upstream_error = json.loads(e.read())
             error(upstream_error['err_msg'])
         except Exception:
             error(str(e))
         return None
Exemplo n.º 9
0
def cli(ctx, path, **kwds):
    """Generate human readable tool test reports.

    Creates reports in various formats  (HTML, text, markdown)
    from the structured test output (tool_test_output.json).
    """
    if not os.path.exists(path):
        io.error("Failed to tool test json file at %s" % path)
        return 1

    test_data = StructuredData(path)
    test_data.calculate_summary_data_if_needed()
    handle_reports(ctx, test_data.structured_data, kwds)
Exemplo n.º 10
0
def process_tool_dependencies_xml(tool_dep, install_handle, env_sh_handle):
    """Writes to handles, returns success as a boolean."""
    if not os.path.isfile(tool_dep):
        error('Missing file %s' % tool_dep)
        return False
    if not os.stat(tool_dep).st_size:
        error('Empty file %s' % tool_dep)
        return False
    try:
        install, env = convert_tool_dep(tool_dep)
    except Exception as err:
        # TODO - pass in ctx for logging?
        error('Error processing %s - %s' %
              (click.format_filename(tool_dep), err))
        if not isinstance(err, (NotImplementedError, RuntimeError)):
            # This is an unexpected error, traceback is useful
            import traceback
            error(traceback.format_exc() + "\n")
        return False
    # Worked...
    for cmd in install:
        install_handle.write(cmd + "\n")
    for cmd in env:
        env_sh_handle.write(cmd + "\n")
    return True
Exemplo n.º 11
0
def __create_repository(ctx, tsi, path, **kwds):
    """Wrapper for creating the endpoint if it doesn't exist
    """
    try:
        repo = shed.create_repository(ctx, tsi, path, **kwds)
        return repo['id']
    # Have to catch missing snyopsis/bioblend exceptions
    except Exception as e:
        # TODO: galaxyproject/bioblend#126
        try:
            upstream_error = json.loads(e.read())
            error(upstream_error['err_msg'])
        except Exception:
            error(str(e))
        return None
Exemplo n.º 12
0
 def find_repository_id(self, ctx, tsi):
     try:
         repo_id = _find_repository_id(
             ctx,
             tsi,
             name=self.name,
             repo_config=self.config,
             allow_none=True,
         )
         return repo_id
     except Exception as e:
         message = api_exception_to_message(e)
         error("Could not update %s" % self.name)
         error(message)
     return None
Exemplo n.º 13
0
def lint_tools_on_path(ctx, paths, lint_args, **kwds):
    assert_tools = kwds.get("assert_tools", True)
    recursive = kwds.get("recursive", False)
    exit_codes = []
    for (tool_path, tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive):
        if handle_tool_load_error(tool_path, tool_xml):
            exit_codes.append(EXIT_CODE_GENERIC_FAILURE)
            continue
        info("Linting tool %s" % tool_path)
        if not lint_tool_source(tool_xml, **lint_args):
            error("Failed linting")
            exit_codes.append(EXIT_CODE_GENERIC_FAILURE)
        else:
            exit_codes.append(EXIT_CODE_OK)
    return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)
Exemplo n.º 14
0
def lint_tools_on_path(ctx, paths, lint_args, **kwds):
    assert_tools = kwds.get("assert_tools", True)
    recursive = kwds.get("recursive", False)
    exit = 0
    valid_tools = 0
    for path in paths:
        for (tool_path, tool_xml) in yield_tool_xmls(ctx, path, recursive):
            info("Linting tool %s" % tool_path)
            if not lint_xml(tool_xml, **lint_args):
                error("Failed linting")
                exit = 1
            else:
                valid_tools += 1
    if exit == 0 and valid_tools == 0 and assert_tools:
        exit = 2
    return exit
Exemplo n.º 15
0
def for_each_repository(ctx, function, paths, **kwds):
    ret_codes = []
    for path in paths:
        with _path_on_disk(path) as raw_path:
            try:
                for realized_repository in _realize_effective_repositories(
                    ctx, raw_path, **kwds
                ):
                    ret_codes.append(
                        function(realized_repository)
                    )
            except RealizationException:
                error(REALIZAION_PROBLEMS_MESSAGE)
                return 254

    return coalesce_return_codes(ret_codes)
Exemplo n.º 16
0
def yield_tool_sources(ctx, path, recursive=False, yield_load_errors=True):
    tools = load_tool_sources_from_path(
        path,
        recursive,
        register_load_errors=True,
    )
    for (tool_path, tool_source) in tools:
        if is_tool_load_error(tool_source):
            if yield_load_errors:
                yield (tool_path, tool_source)
            else:
                error(LOAD_ERROR_MESSAGE % tool_path)
            continue

        if not _is_tool_source(ctx, tool_path, tool_source):
            continue
        yield (tool_path, tool_source)
Exemplo n.º 17
0
def for_each_repository(ctx, function, paths, **kwds):
    ret_codes = []
    for path in paths:
        with _path_on_disk(path) as raw_path:
            try:
                for realized_repository in _realize_effective_repositories(
                    ctx, raw_path, **kwds
                ):
                    ret_codes.append(
                        function(realized_repository)
                    )
            except RealizationException:
                error(REALIZAION_PROBLEMS_MESSAGE)
                return 254

    # "Good" returns are Nones, everything else is a -1 and should be
    # passed upwards.
    return 0 if all((not x) for x in ret_codes) else -1
Exemplo n.º 18
0
def cli(ctx, path, **kwds):
    """Handle possible recursion through paths for uploading files to a toolshed
    """

    def upload(path):
        return __handle_upload(ctx, path, **kwds)

    if kwds['recursive']:
        if kwds['name'] is not None:
            error("--name is incompatible with --recursive")
            return -1
        if kwds['tar'] is not None:
            error("--tar is incompatible with --recursive")
            return -1

        exit_code = shed.for_each_repository(upload, path)
    else:
        exit_code = upload(path)
    sys.exit(exit_code)
Exemplo n.º 19
0
 def update(realized_repository):
     upload_ok = True
     if not kwds["skip_upload"]:
         upload_ok = not shed.upload_repository(
             ctx, realized_repository, **kwds
         )
     repo_id = realized_repository.find_repository_id(ctx, tsi)
     metadata_ok = True
     if not kwds["skip_metadata"]:
         metadata_ok = realized_repository.update(ctx, tsi, repo_id)
     if metadata_ok:
         info("Repository metadata updated.")
     else:
         error("Failed to update repository metadata.")
     if metadata_ok and upload_ok:
         return 0
     else:
         error("Failed to update a repository.")
         return 1
Exemplo n.º 20
0
def download_tarball(ctx, tsi, realized_repository, **kwds):
    repo_id = realized_repository.find_repository_id(ctx, tsi)
    if repo_id is None:
        message = "Unable to find repository id, cannot download."
        error(message)
        raise Exception(message)
    destination_pattern = kwds.get('destination', 'shed_download.tar.gz')
    if kwds.get("destination_is_pattern", True):
        destination = realized_repository.pattern_to_file_name(destination_pattern)
    else:
        destination = destination_pattern
    to_directory = not destination.endswith("gz")
    download_tar(tsi, repo_id, destination, to_directory=to_directory)
    if to_directory:
        clean = kwds.get("clean", False)
        if clean:
            archival_file = os.path.join(destination, ".hg_archival.txt")
            if os.path.exists(archival_file):
                os.remove(archival_file)
Exemplo n.º 21
0
def upload_repository(ctx, realized_repository, **kwds):
    """Upload a tool directory as a tarball to a tool shed."""
    path = realized_repository.path
    tar_path = kwds.get("tar", None)
    if not tar_path:
        tar_path = build_tarball(path, **kwds)
    if kwds.get("tar_only", False):
        name = realized_repository.pattern_to_file_name("shed_upload.tar.gz")
        shutil.copy(tar_path, name)
        return 0
    shed_context = get_shed_context(ctx, **kwds)
    update_kwds = {}
    _update_commit_message(ctx, realized_repository, update_kwds, **kwds)

    repo_id = handle_force_create(realized_repository, ctx, shed_context, **kwds)
    # failing to create the repo, give up
    if repo_id is None:
        return report_non_existent_repository(realized_repository)

    if kwds.get("check_diff", False):
        is_diff = diff_repo(ctx, realized_repository, **kwds) != 0
        if not is_diff:
            name = realized_repository.name
            info("Repository [%s] not different, skipping upload." % name)
            return 0

    # TODO: support updating repo information if it changes in the config file
    try:
        shed_context.tsi.repositories.update_repository(
            str(repo_id), tar_path, **update_kwds
        )
    except Exception as e:
        if isinstance(e, bioblend.ConnectionError) and e.status_code == 400 and \
                '"No changes to repository."' in e.body:
            warn("Repository %s was not updated because there were no changes" % realized_repository.name)
            return 0
        message = api_exception_to_message(e)
        error("Could not update %s" % realized_repository.name)
        error(message)
        return -1
    info("Repository %s updated successfully." % realized_repository.name)
    return 0
Exemplo n.º 22
0
 def __init__(self, json_path):
     self.json_path = json_path
     try:
         with open(json_path, "r") as output_json_f:
             structured_data = json.load(output_json_f)
             structured_data_tests = structured_data["tests"]
     except Exception:
         error("Warning: Targetting older Galaxy which did not "
               "produce a structured test results files.")
         structured_data = {}
         structured_data_tests = {}
     self.structured_data = structured_data
     self.structured_data_tests = structured_data_tests
     structured_data_by_id = {}
     for test in self.structured_data_tests:
         structured_data_by_id[test["id"]] = test["data"]
     self.structured_data_by_id = structured_data_by_id
     self.has_details = "summary" in structured_data
     if self.has_details:
         self._read_summary()
Exemplo n.º 23
0
 def create(self, ctx, tsi):
     """Wrapper for creating the endpoint if it doesn't exist
     """
     try:
         repo = create_repository_for(
             ctx,
             tsi,
             self.name,
             self.config,
         )
         return repo['id']
     # Have to catch missing snyopsis/bioblend exceptions
     except Exception as e:
         # TODO: galaxyproject/bioblend#126
         try:
             upstream_error = json.loads(e.read())
             error(upstream_error['err_msg'])
         except Exception:
             error(str(e))
         return None
Exemplo n.º 24
0
def for_path(path):
    """Produce a class:`Runnable` for supplied path."""
    runnable_type = None
    if os.path.isdir(path):
        runnable_type = RunnableType.directory
    elif looks_like_a_tool_cwl(path):
        runnable_type = RunnableType.cwl_tool
    elif looks_like_a_tool_xml(path):
        runnable_type = RunnableType.galaxy_tool
    elif is_a_yaml_with_class(path, ["GalaxyWorkflow"]):
        runnable_type = RunnableType.galaxy_workflow
    elif path.endswith(".ga"):
        runnable_type = RunnableType.galaxy_workflow
    elif looks_like_a_cwl_artifact(path, ["Workflow"]):
        runnable_type = RunnableType.cwl_workflow

    if runnable_type is None:
        error("Unable to determine runnable type for path [%s]" % path)
        raise ExitCodeException(EXIT_CODE_UNKNOWN_FILE_TYPE)

    return Runnable(path, runnable_type)
Exemplo n.º 25
0
def cli(ctx, path, **kwds):
    """Activate a conda environment for tool.

    Source the output of this command to activate a conda environment for this
    tool.

    \b
        $ . <(planemo conda_env seqtk_seq.xml)
        Deactivate environment with conda_env_deactivate
        (seqtk_seq_v6) $ which seqtk
        /home/planemo/miniconda2/envs/jobdepsDkzcjjfecc6d406196737781ff4456ec60975c137e04884e4f4b05dc68192f7cec4656/bin/seqtk
        (seqtk_seq_v6) $ conda_env_deactivate
        $

    """
    conda_context = build_conda_context(ctx, use_planemo_shell_exec=False, **kwds)
    conda_targets = collect_conda_targets(
        ctx, [path], conda_context=conda_context
    )
    installed_conda_targets = conda_util.filter_installed_targets(
        conda_targets, conda_context=conda_context
    )
    env_name, exit_code = conda_util.build_isolated_environment(
        installed_conda_targets, conda_context=conda_context, quiet=True
    )
    if exit_code:
        error("Failed to build environmnt for request.")
        return 1

    ps1 = ps1_for_path(path, base="PRE_CONDA_PS1")
    remove_env = "%s env remove -y --name '%s'" % (
        conda_context.conda_exec, env_name
    )
    deactivate = conda_context.deactivate
    activate = conda_context.activate
    command = SOURCE_COMMAND % (
        activate, env_name, ps1,
        deactivate, remove_env
    )
    print(command)
Exemplo n.º 26
0
def cli(ctx, paths, **kwds):
    """Install conda packages for tool requirements."""
    conda_context = build_conda_context(ctx, **kwds)
    if not conda_context.is_conda_installed():
        auto_init = kwds.get("conda_auto_init", False)
        failed = True
        if auto_init:
            if conda_context.can_install_conda():
                if conda_util.install_conda(conda_context):
                    error("Attempted to install conda and failed.")
                else:
                    failed = False
            else:
                error("Cannot install conda, failing conda_install.")
        else:
            error("Conda not configured - run planemo conda_init' or pass --conda_auto_init to continue.")

        if failed:
            raise ExitCodeException(EXIT_CODE_FAILED_DEPENDENCIES)

    return_codes = []
    for conda_target in collect_conda_targets(ctx, paths):
        ctx.log("Install conda target %s" % conda_target)
        return_code = conda_util.install_conda_target(
            conda_target, conda_context=conda_context
        )
        return_codes.append(return_code)
    return coalesce_return_codes(return_codes, assert_at_least_one=True)
Exemplo n.º 27
0
 def update(realized_repository):
     upload_ret_code = 0
     upload_ok = True
     if not kwds["skip_upload"]:
         upload_ret_code = shed.upload_repository(
             ctx, realized_repository, **kwds
         )
         upload_ok = not upload_ret_code
     if upload_ret_code == 2:
         error("Failed to update repository it does not exist "
               "in target ToolShed.")
         return upload_ret_code
     repo_id = realized_repository.find_repository_id(ctx, tsi)
     metadata_ok = True
     if not kwds["skip_metadata"]:
         metadata_ok = realized_repository.update(ctx, tsi, repo_id)
     if metadata_ok:
         info("Repository metadata updated.")
     else:
         error("Failed to update repository metadata.")
     if metadata_ok and upload_ok:
         return 0
     else:
         error("Failed to update a repository.")
         return 1
Exemplo n.º 28
0
 def update(realized_repository):
     collected_data['results']['total'] += 1
     upload_ret_code = 0
     upload_ok = True
     if not kwds["skip_upload"]:
         upload_ret_code = shed.upload_repository(
             ctx, realized_repository, **kwds
         )
         upload_ok = not upload_ret_code
     if upload_ret_code == 2:
         collected_data['results']['failures'] += 1
         collected_data['tests'].append({
             'classname': realized_repository.name,
             'result': 2,
         })
         error("Failed to update repository it does not exist "
               "in target ToolShed.")
         return upload_ret_code
     repo_id = realized_repository.find_repository_id(ctx, shed_context)
     metadata_ok = True
     if not kwds["skip_metadata"]:
         metadata_ok = realized_repository.update(ctx, shed_context, repo_id)
     if metadata_ok:
         info("Repository metadata updated.")
     else:
         error("Failed to update repository metadata.")
     if metadata_ok and upload_ok:
         collected_data['tests'].append({
             'classname': realized_repository.name,
             'result': 0,
         })
         return 0
     elif upload_ok:
         collected_data['results']['skips'] += 1
         collected_data['tests'].append({
             'classname': realized_repository.name,
             'result': 3,
         })
         error("Repo updated but metadata was not.")
         return 1
     else:
         collected_data['results']['failures'] += 1
         collected_data['tests'].append({
             'classname': realized_repository.name,
             'result': 1,
         })
         error("Failed to update a repository.")
         return 1
Exemplo n.º 29
0
 def __init__(self, json_path):
     self.json_path = json_path
     if not json_path or not os.path.exists(json_path):
         error(NO_STRUCTURED_FILE % json_path)
     else:
         try:
             with open(json_path, "r") as output_json_f:
                 structured_data = json.load(output_json_f)
                 structured_data_tests = structured_data["tests"]
         except Exception:
             error("Galaxy produced invalid JSON for structured data - summary "
                   "information and planemo reports will be incorrect.")
             structured_data = {}
             structured_data_tests = {}
     self.structured_data = structured_data
     self.structured_data_tests = structured_data_tests
     structured_data_by_id = {}
     for test in self.structured_data_tests:
         structured_data_by_id[test["id"]] = test["data"]
     self.structured_data_by_id = structured_data_by_id
     self.has_details = "summary" in structured_data
     if self.has_details:
         self.read_summary()
Exemplo n.º 30
0
def for_each_repository(ctx, function, paths, **kwds):
    ret_codes = []
    for path in paths:
        with _path_on_disk(path) as raw_path:
            try:
                for realized_repository in _realize_effective_repositories(
                    ctx, raw_path, **kwds
                ):
                    ret_codes.append(
                        function(realized_repository)
                    )
            except RealizationException:
                error(REALIZAION_PROBLEMS_MESSAGE)
                return 254

    ret_code = _coalesce_return_codes(ret_codes)
    if ret_code < 0:
        # Map -1 => 254, -2 => 253, etc...
        # Not sure it is helpful to have negative error codes
        # this was a design and API mistake in planemo.
        ret_code = 255 + ret_code

    return ret_code
Exemplo n.º 31
0
def _build_raw_repo_objects(raw_dirs, **kwds):
    """
    From specific directories with .shed.yml files or specified directly from
    the comman-line build abstract description of directories that should be
    expanded out into shed repositories.
    """
    multiple = len(raw_dirs) > 1
    name = kwds.get("name", None)
    skip_errors = kwds.get("skip_errors", False)

    raw_repo_objects = []
    for raw_dir in raw_dirs:
        try:
            config = shed_repo_config(raw_dir, name=name)
        except Exception as e:
            if skip_errors:
                error_message = PARSING_PROBLEM % (raw_dir, e)
                error(error_message)
                continue
            else:
                raise
        raw_repo_object = RawRepositoryDirectory(raw_dir, config, multiple)
        raw_repo_objects.append(raw_repo_object)
    return raw_repo_objects
Exemplo n.º 32
0
def cli(ctx, path, **kwds):
    """Activate a conda environment for tool.

    Source the output of this command to activate a conda environment for this
    tool.

    \b
        $ . <(planemo conda_env seqtk_seq.xml)
        Deactivate environment with conda_env_deactivate
        (seqtk_seq_v6) $ which seqtk
        /home/planemo/miniconda2/envs/jobdepsDkzcjjfecc6d406196737781ff4456ec60975c137e04884e4f4b05dc68192f7cec4656/bin/seqtk
        (seqtk_seq_v6) $ conda_env_deactivate
        $

    """
    conda_context = build_conda_context(ctx,
                                        use_planemo_shell_exec=False,
                                        **kwds)
    conda_targets = collect_conda_targets(ctx, [path],
                                          conda_context=conda_context)
    installed_conda_targets = conda_util.filter_installed_targets(
        conda_targets, conda_context=conda_context)
    env_name, exit_code = conda_util.build_isolated_environment(
        installed_conda_targets, conda_context=conda_context, quiet=True)
    if exit_code:
        error("Failed to build environmnt for request.")
        return 1

    ps1 = ps1_for_path(path, base="PRE_CONDA_PS1")
    remove_env = "%s env remove -y --name '%s'" % (conda_context.conda_exec,
                                                   env_name)
    deactivate = conda_context.deactivate
    activate = conda_context.activate
    command = SOURCE_COMMAND % (activate, env_name, ps1, deactivate,
                                remove_env)
    print(command)
Exemplo n.º 33
0
 def __init__(self, json_path):
     self.json_path = json_path
     if not json_path or not os.path.exists(json_path):
         error(NO_STRUCTURED_FILE % json_path)
     else:
         try:
             with open(json_path, "r") as output_json_f:
                 structured_data = json.load(output_json_f)
                 structured_data_tests = structured_data["tests"]
         except Exception:
             error(
                 "Galaxy produced invalid JSON for structured data - summary "
                 "information and planemo reports will be incorrect.")
             structured_data = {}
             structured_data_tests = {}
     self.structured_data = structured_data
     self.structured_data_tests = structured_data_tests
     structured_data_by_id = {}
     for test in self.structured_data_tests:
         structured_data_by_id[test["id"]] = test["data"]
     self.structured_data_by_id = structured_data_by_id
     self.has_details = "summary" in structured_data
     if self.has_details:
         self.read_summary()
Exemplo n.º 34
0
def __handle_upload(ctx, realized_repository, **kwds):
    """Upload a tool directory as a tarball to a tool shed.
    """
    path = realized_repository.path
    tar_path = kwds.get("tar", None)
    if not tar_path:
        tar_path = shed.build_tarball(path, **kwds)
    if kwds["tar_only"]:
        suffix = ""
        if realized_repository.multiple:
            name = realized_repository.config["name"]
            suffix = "_%s" % name.replace("-", "_")
        shell("cp %s shed_upload%s.tar.gz" % (tar_path, suffix))
        return 0
    tsi = shed.tool_shed_client(ctx, **kwds)
    update_kwds = {}
    message = kwds.get("message", None)
    if message:
        update_kwds["commit_message"] = message

    # TODO: this needs to use realized repository
    repo_id = realized_repository.find_repository_id(ctx, tsi)
    if repo_id is None and kwds["force_repository_creation"]:
        repo_id = realized_repository.create(ctx, tsi)
    # failing to create the repo, give up
    if repo_id is None:
        return -1
    # TODO: support updating repo information if it changes in the config file

    try:
        tsi.repositories.update_repository(repo_id, tar_path, **update_kwds)
    except Exception as e:
        exception_content = e.read()
        try:
            # Galaxy passes nice JSON messages as their errors, which bioblend
            # blindly returns. Attempt to parse those.
            upstream_error = json.loads(exception_content)
            error(upstream_error['err_msg'])
        except Exception as e2:
            error("Could not update %s" % realized_repository.name)
            error(exception_content)
            error(e2.read())
        return -1
    info("Repository %s updated successfully." % realized_repository.name)
    return 0
Exemplo n.º 35
0
def _check_test_outputs(xunit_report_file_tracker,
                        structured_report_file_tracker):
    if not os.path.exists(xunit_report_file_tracker.path):
        message = NO_XUNIT_REPORT_MESSAGE % xunit_report_file_tracker.path
        error(message)
        raise Exception(message)

    if not os.path.exists(structured_report_file_tracker.path):
        message = NO_JSON_REPORT_MESSAGE % structured_report_file_tracker.path
        error(message)
        raise Exception(message)

    if not xunit_report_file_tracker.changed():
        message = REPORT_NOT_CHANGED % xunit_report_file_tracker.path
        error(message)
        raise Exception(message)

    if not structured_report_file_tracker.changed():
        message = REPORT_NOT_CHANGED % structured_report_file_tracker.path
        error(message)
        raise Exception(message)
Exemplo n.º 36
0
 def find_repository_id(self, ctx, tsi):
     try:
         repo_id = _find_repository_id(
             ctx,
             tsi,
             name=self.name,
             repo_config=self.config,
             allow_none=True,
         )
         return repo_id
     except Exception as e:
         error("Could not update %s" % self.name)
         try:
             error(e.read())
         except AttributeError:
             # I've seen a case where the error couldn't be read, so now
             # wrapped in try/except
             error("Could not query for repository in toolshed")
     return None
Exemplo n.º 37
0
def upload_repository(ctx, realized_repository, **kwds):
    """Upload a tool directory as a tarball to a tool shed.
    """
    path = realized_repository.path
    tar_path = kwds.get("tar", None)
    if not tar_path:
        tar_path = build_tarball(path, **kwds)
    if kwds.get("tar_only", False):
        name = realized_repository.pattern_to_file_name("shed_upload.tar.gz")
        shell("cp '%s' '%s'" % (tar_path, name))
        return 0
    shed_context = get_shed_context(ctx, **kwds)
    update_kwds = {}
    _update_commit_message(ctx, realized_repository, update_kwds, **kwds)

    repo_id = realized_repository.find_repository_id(ctx, shed_context)
    if repo_id is None and kwds["force_repository_creation"]:
        repo_id = realized_repository.create(ctx, shed_context)
    # failing to create the repo, give up
    if repo_id is None:
        name = realized_repository.name
        error("Repository [%s] does not exist in the targeted Tool Shed." % name)
        return 2

    if kwds.get("check_diff", False):
        is_diff = diff_repo(ctx, realized_repository, **kwds)
        if not is_diff:
            name = realized_repository.name
            info("Repository [%s] not different, skipping upload." % name)
            return 0

    # TODO: support updating repo information if it changes in the config file
    try:
        shed_context.tsi.repositories.update_repository(
            repo_id, tar_path, **update_kwds
        )
    except Exception as e:
        message = api_exception_to_message(e)
        error("Could not update %s" % realized_repository.name)
        error(message)
        return -1
    info("Repository %s updated successfully." % realized_repository.name)
    return 0
Exemplo n.º 38
0
def build_conda_context(ctx, **kwds):
    """Build a galaxy-lib CondaContext tailored to planemo use.

    Using planemo's common command-line/global config options.
    """
    condarc_override_default = os.path.join(ctx.workspace, "condarc")
    conda_prefix = kwds.get("conda_prefix", None)
    use_planemo_shell = kwds.get("use_planemo_shell_exec", True)
    ensure_channels = kwds.get("conda_ensure_channels", "")
    condarc_override = kwds.get("condarc", condarc_override_default)
    use_local = kwds.get("conda_use_local", False)
    shell_exec = shell if use_planemo_shell else None
    conda_context = conda_util.CondaContext(conda_prefix=conda_prefix,
                                            ensure_channels=ensure_channels,
                                            condarc_override=condarc_override,
                                            use_local=use_local,
                                            shell_exec=shell_exec)
    handle_auto_init = kwds.get("handle_auto_init", False)
    if handle_auto_init and not conda_context.is_installed():
        auto_init = kwds.get("conda_auto_init", True)
        failed = True
        if auto_init:
            if conda_context.can_install_conda():
                if conda_util.install_conda(conda_context):
                    error(MESSAGE_ERROR_FAILED_INSTALL)
                else:
                    failed = False
            else:
                error(MESSAGE_ERROR_CANNOT_INSTALL)
        else:
            error(MESSAGE_ERROR_NOT_INSTALLING)

        if failed:
            raise ExitCodeException(EXIT_CODE_FAILED_DEPENDENCIES)
    if handle_auto_init:
        conda_context.ensure_conda_build_installed_if_needed()
    return conda_context
Exemplo n.º 39
0
 def data_error():
     error("An invalid JSON for structured test result data - "
           "summary information and planemo reports will be "
           "incorrect.")
Exemplo n.º 40
0
def _load_exception_handler(path, exc_info):
    error(LOAD_ERROR_MESSAGE % path)
    traceback.print_exception(*exc_info, limit=1, file=sys.stderr)
Exemplo n.º 41
0
def cli(ctx, paths, **kwds):  # noqa C901
    """Auto-update tool requirements by checking against Conda and updating if newer versions are available."""
    assert_tools = kwds.get("assert_tools", True)
    recursive = kwds.get("recursive", False)
    exit_codes = []
    modified_files = set()
    tools_to_skip = [line.rstrip() for line in open(kwds['skiplist'])
                     ] if kwds['skiplist'] else []
    runnables = for_paths(paths)

    if any(r.type in {RunnableType.galaxy_tool, RunnableType.directory}
           for r in runnables):
        # update Galaxy tools
        for (tool_path,
             tool_xml) in yield_tool_sources_on_paths(ctx, paths, recursive):
            if tool_path.split('/')[-1] in tools_to_skip:
                info("Skipping tool %s" % tool_path)
                continue
            info("Auto-updating tool %s" % tool_path)
            try:
                updated = autoupdate.autoupdate_tool(
                    ctx, tool_path, modified_files=modified_files, **kwds)
                if updated:
                    modified_files.update(updated)
            except Exception as e:
                error(
                    f"{tool_path} could not be updated - the following error was raised: {e.__str__()}"
                )
            if handle_tool_load_error(tool_path, tool_xml):
                exit_codes.append(EXIT_CODE_GENERIC_FAILURE)
                continue
            else:
                exit_codes.append(EXIT_CODE_OK)

    workflows = [
        r for r in runnables if r.type == RunnableType.galaxy_workflow
    ]
    modified_workflows = []
    if workflows:
        assert is_galaxy_engine(**kwds)
        if kwds.get("engine") != "external_galaxy":
            kwds["install_most_recent_revision"] = True
            kwds["install_resolver_dependencies"] = False
            kwds["install_repository_dependencies"] = False
            kwds['shed_install'] = True

        with engine_context(ctx, **kwds) as galaxy_engine:
            with galaxy_engine.ensure_runnables_served(workflows) as config:
                for workflow in workflows:
                    if config.updated_repos.get(workflow.path) or kwds.get(
                            "engine") == "external_galaxy":
                        info("Auto-updating workflow %s" % workflow.path)
                        updated_workflow = autoupdate.autoupdate_wf(
                            ctx, config, workflow)
                        if workflow.path.endswith(".ga"):
                            with open(workflow.path, 'w') as f:
                                json.dump(updated_workflow,
                                          f,
                                          indent=4,
                                          sort_keys=True)
                        else:
                            format2_wrapper = from_galaxy_native(
                                updated_workflow, json_wrapper=True)
                            with open(workflow.path, "w") as f:
                                f.write(format2_wrapper["yaml_content"])
                        modified_workflows.append(workflow.path)
                    else:
                        info(
                            "No newer tool versions were found, so the workflow was not updated."
                        )

    if kwds['test']:
        if not modified_files:
            info("No tools were updated, so no tests were run.")
        else:
            with temp_directory(dir=ctx.planemo_directory) as temp_path:
                # only test tools in updated directories
                modified_paths = [
                    path for path, tool_xml in yield_tool_sources_on_paths(
                        ctx, paths, recursive) if path in modified_files
                ]
                info(
                    f"Running tests for the following auto-updated tools: {', '.join(modified_paths)}"
                )
                runnables = for_paths(modified_paths + modified_workflows,
                                      temp_path=temp_path)
                kwds["engine"] = "galaxy"
                return_value = test_runnables(ctx,
                                              runnables,
                                              original_paths=paths,
                                              **kwds)
                exit_codes.append(return_value)
    return coalesce_return_codes(exit_codes, assert_at_least_one=assert_tools)
Exemplo n.º 42
0
def _diff_in(ctx, working, realized_repository, **kwds):
    path = realized_repository.path
    shed_target_source = kwds.get("shed_target_source", None)

    label_a = "_%s_" % (shed_target_source
                        if shed_target_source else "workingdir")
    shed_target = kwds.get("shed_target", "B")
    if "/" in shed_target:
        shed_target = "custom_shed"
    label_b = "_%s_" % shed_target

    mine = os.path.join(working, label_a)
    other = os.path.join(working, label_b)

    shed_context = get_shed_context(ctx, read_only=True, **kwds)
    # In order to download the tarball, require repository ID...
    repo_id = realized_repository.find_repository_id(ctx, shed_context)
    if repo_id is None:
        error(
            "shed_diff: Repository [%s] does not exist in the targeted Tool Shed."
            % realized_repository.name)
        # $ diff README.rst not_a_file 2&>1 /dev/null; echo $?
        # 2
        return 2
    info("Diffing repository [%s]" % realized_repository.name)
    download_tarball(ctx,
                     shed_context,
                     realized_repository,
                     destination=other,
                     clean=True,
                     destination_is_pattern=False,
                     **kwds)
    if shed_target_source:
        new_kwds = kwds.copy()
        new_kwds["shed_target"] = shed_target_source
        shed_context = get_shed_context(ctx, read_only=True, **new_kwds)
        download_tarball(ctx,
                         shed_context,
                         realized_repository,
                         destination=mine,
                         clean=True,
                         destination_is_pattern=False,
                         **new_kwds)
    else:
        tar_path = build_tarball(path)
        cmd_template = 'mkdir "%s"; tar -xzf "%s" -C "%s"; rm -rf %s'
        shell(cmd_template % (mine, tar_path, mine, tar_path))

    output = kwds.get("output", None)
    raw = kwds.get("raw", False)
    xml_diff = 0
    if not raw:
        if output:
            with open(output, "w") as f:
                xml_diff = diff_and_remove(working, label_a, label_b, f)
        else:
            xml_diff = diff_and_remove(working, label_a, label_b, sys.stdout)

    cmd = 'cd "%s"; diff -r %s %s' % (working, label_a, label_b)
    if output:
        cmd += " >> '%s'" % output
    raw_diff = shell(cmd)
    exit = raw_diff or xml_diff
    if not raw:
        if xml_diff:
            ctx.vlog("One or more shed XML file(s) different!")
        if raw_diff:
            ctx.vlog("One or more non-shed XML file(s) different.")
        if not xml_diff and not raw_diff:
            ctx.vlog("No differences.")
    return exit
Exemplo n.º 43
0
 def __init__(self, json_path):
     if not json_path or not os.path.exists(json_path):
         error(NO_STRUCTURED_FILE % json_path)
     super(StructuredData, self).__init__(json_path)
Exemplo n.º 44
0
def lint_repository(ctx, realized_repository, **kwds):
    # TODO: this really needs to start working with realized path.
    failed = False
    path = realized_repository.real_path
    info("Linting repository %s" % path)
    lint_args = build_lint_args(ctx, **kwds)
    lint_ctx = LintContext(lint_args["level"])
    lint_ctx.lint(
        "lint_expansion",
        lint_expansion,
        realized_repository,
    )
    lint_ctx.lint(
        "lint_expected_files",
        lint_expected_files,
        realized_repository,
    )
    lint_ctx.lint(
        "lint_tool_dependencies_xsd",
        lint_tool_dependencies_xsd,
        path,
    )
    lint_ctx.lint(
        "lint_tool_dependencies_actions",
        lint_tool_dependencies_actions,
        path,
    )
    lint_ctx.lint(
        "lint_repository_dependencies",
        lint_repository_dependencies,
        path,
    )
    lint_ctx.lint(
        "lint_shed_yaml",
        lint_shed_yaml,
        realized_repository,
    )
    lint_ctx.lint(
        "lint_readme",
        lint_readme,
        path,
    )
    if kwds["tools"]:
        for (tool_path, tool_xml) in yield_tool_xmls(ctx, path,
                                                     recursive=True):
            info("+Linting tool %s" % tool_path)
            if handle_tool_load_error(tool_path, tool_xml):
                failed = True
                continue
            lint_xml_with(lint_ctx,
                          tool_xml,
                          extra_modules=lint_args["extra_modules"])
    if kwds["ensure_metadata"]:
        lint_ctx.lint(
            "lint_shed_metadata",
            lint_shed_metadata,
            realized_repository,
        )
    if not failed:
        failed = lint_ctx.failed(lint_args["fail_level"])
    if failed:
        error("Failed linting")
    return 1 if failed else 0
def cli(ctx, paths, recursive=False, fail_fast=True, download_cache=None):
    """Compile tool_dependencies.xml to bash script.

    An experimental approach parsing tool_dependencies.xml files into
    bash shell scripts, intended initially for use within Continuous
    Integration testing setups like TravisCI.

    Parses the ``tool_dependencies.xml`` files from the specified projects,
    and converts them into an installation bash script (``dep_install.sh``),
    and a shell script (``env.sh``) defining any new/edited environment
    variables.

    These are intended to be used via ``bash dep_install.sh`` (once), and as
    ``source env.sh`` prior to running any of the dependencies to set the
    environment variable within the current shell session.

    Both ``dep_install.sh`` and ``env.sh`` require ``$INSTALL_DIR`` be defined
    before running them, set to an existing directory with write permissions.
    Beware than if run on multiple tools, they can over-write each other (for
    example if you have packages for different versions of the same tool). In
    this case make separate calls to ``planemo dependency_script`` and call
    the scripts with different installation directories.

    This command will download (and cache) any URLs specified via Galaxy
    download actions. This is in order to decompress them and determine the
    relevant sub-folder to change into as per the Tool Shed install mechanism,
    so that this can be recorded as a ``cd`` comand in the bash script.

    The download cache used by ``planemo dependency_script`` and the resulting
    output script ``dep_install.sh`` defaults to ``./download_cache`` (under
    the current working directory), and can be set with ``$DOWNLOAD_CACHE``.

    If the ``tool_dependencies.xml`` file includes SHA256 checksums for
    downloads, these will be verified after downloading to the cache (by
    either ``planemo dependency_script`` or ``bash dep_install.sh``).

    This is experimental, and is initially intended for use within continuous
    integration testing setups like TravisCI to both verify the dependency
    installation receipe works, and to use this to run functional tests.
    """
    # TODO: Command line API for bash output filanames & install dir, cache.
    if download_cache:
        assert os.path.isdir(download_cache), download_cache
        # Effectively using this as a global variable, refactor this
        # once using a visitor pattern instead of action.to_bash()
        os.environ["DOWNLOAD_CACHE"] = os.path.abspath(download_cache)
        print("Using $DOWNLOAD_CACHE=%r" % os.environ["DOWNLOAD_CACHE"])
    failed = False
    with open("env.sh", "w") as env_sh_handle:
        with open("dep_install.sh", "w") as install_handle:
            install_handle.write(preamble_dep_install)
            env_sh_handle.write(preamble_env_sh)
            for path in paths:
                # ctx.log("Checking: %r" % path)
                if failed and fail_fast:
                    break
                for tool_dep in find_tool_dependencis_xml(path, recursive):
                    passed = process_tool_dependencies_xml(
                        tool_dep, install_handle, env_sh_handle)
                    if passed:
                        info('Processed %s' % tool_dep)
                    else:
                        failed = True
                        if fail_fast:
                            for line in [
                                    '#' + '*' * 60,
                                    'echo "WARNING: Skipping %s"' % tool_dep,
                                    '#' + '*' * 60
                            ]:
                                install_handle.write(line + "\n")
                            break
                        # error("%s failed" % tool_dep)
            install_handle.write(final_dep_install)
    ctx.log("The End")
    if failed:
        error('Error processing one or more tool_dependencies.xml files.')
        sys.exit(1)
Exemplo n.º 46
0
def _diff_in(ctx, working, realized_repository, **kwds):
    path = realized_repository.path
    shed_target_source = kwds.get("shed_target_source")

    label_a = "_%s_" % (shed_target_source if shed_target_source else "workingdir")
    shed_target = kwds.get("shed_target", "B")
    if "/" in shed_target:
        shed_target = "custom_shed"
    label_b = "_%s_" % shed_target

    mine = os.path.join(working, label_a)
    other = os.path.join(working, label_b)

    shed_context = get_shed_context(ctx, read_only=True, **kwds)
    # In order to download the tarball, require repository ID...
    repo_id = realized_repository.find_repository_id(ctx, shed_context)
    if repo_id is None:
        error("shed_diff: Repository [%s] does not exist in the targeted Tool Shed."
              % realized_repository.name)
        # $ diff README.rst not_a_file 2&>1 /dev/null; echo $?
        # 2
        return 2
    info("Diffing repository [%s]" % realized_repository.name)
    download_tarball(
        ctx,
        shed_context,
        realized_repository,
        destination=other,
        clean=True,
        destination_is_pattern=False,
        **kwds
    )
    if shed_target_source:
        new_kwds = kwds.copy()
        new_kwds["shed_target"] = shed_target_source
        shed_context = get_shed_context(ctx, read_only=True, **new_kwds)
        download_tarball(
            ctx,
            shed_context,
            realized_repository,
            destination=mine,
            clean=True,
            destination_is_pattern=False,
            **new_kwds
        )
    else:
        tar_path = build_tarball(path)
        os.mkdir(mine)
        shell(['tar', '-xzf', tar_path, '-C', mine])
        shutil.rmtree(tar_path, ignore_errors=True)

    output = kwds.get("output")
    raw = kwds.get("raw", False)
    xml_diff = 0
    if not raw:
        if output:
            with open(output, "w") as f:
                xml_diff = diff_and_remove(working, label_a, label_b, f)
        else:
            xml_diff = diff_and_remove(working, label_a, label_b, sys.stdout)

    cmd = ['diff', '-r', label_a, label_b]
    if output:
        with open(output, 'ab') as fh:
            raw_diff = shell(cmd, cwd=working, stdout=fh)
    else:
        raw_diff = shell(cmd, cwd=working)
    exit = raw_diff or xml_diff
    if not raw:
        if xml_diff:
            ctx.vlog("One or more shed XML file(s) different!")
        if raw_diff:
            ctx.vlog("One or more non-shed XML file(s) different.")
        if not xml_diff and not raw_diff:
            ctx.vlog("No differences.")
    return exit
Exemplo n.º 47
0
def _handle_realization_error(exception, **kwds):
    fail_fast = kwds.get("fail_fast", False)
    if fail_fast:
        raise exception
    else:
        error(unicodify(exception))
Exemplo n.º 48
0
 def not_specifing_dependent_option(x, y):
     if kwds.get(x) and not kwds.get(y):
         template = "Can only use the --%s option if also specifying --%s"
         message = template % (x, y)
         io.error(message)
         return True
Exemplo n.º 49
0
 def not_exclusive(x, y):
     if kwds.get(x) and kwds.get(y):
         io.error("Can only specify one of --%s and --%s" % (x, y))
         return True
Exemplo n.º 50
0
def report_non_existent_repository(realized_repository):
    name = realized_repository.name
    error("Repository [%s] does not exist in the targeted Tool Shed." % name)
    return 2
Exemplo n.º 51
0
 def _check_can_run(self, runnable):
     if not self.can_run(runnable):
         template = "Engine type [%s] cannot execute [%s]s"
         message = template % (self.__class__, runnable.type)
         error(message)
         self._ctx.exit(EXIT_CODE_UNSUPPORTED_FILE_TYPE)
Exemplo n.º 52
0
    def update(realized_repository):
        collected_data['results']['total'] += 1
        skip_upload = kwds["skip_upload"]
        skip_metadata = kwds["skip_metadata"]
        upload_ret_code = 0
        upload_ok = True

        captured_io = {}
        if not skip_upload:
            with captured_io_for_xunit(kwds, captured_io):
                upload_ret_code = shed.upload_repository(
                    ctx, realized_repository, **kwds)
                upload_ok = not upload_ret_code

        repo_result = {
            'classname': realized_repository.name,
            'time': captured_io.get("time", None),
            'name': 'shed-update',
            'stdout': captured_io.get("stdout", None),
            'stderr': captured_io.get("stderr", None),
        }

        # Now that we've uploaded (or skipped appropriately), collect results.
        if upload_ret_code == 2:
            collected_data['results']['failures'] += 1
            repo_result.update({
                'errorType':
                'FailedUpdate',
                'errorMessage':
                'Failed to update repository as it does not exist in target ToolShed',
            })
            collected_data['tests'].append(repo_result)
            error("Failed to update repository it does not exist "
                  "in target ToolShed.")
            return upload_ret_code

        exit = 0
        metadata_ok = True
        if not skip_metadata:
            repo_id = shed.handle_force_create(realized_repository, ctx,
                                               shed_context, **kwds)
            # failing to create the repo, give up
            if repo_id is None:
                exit = shed.report_non_existent_repository(realized_repository)
                metadata_ok = False
            else:
                metadata_ok = realized_repository.update(
                    ctx, shed_context, repo_id)
        else:
            info("Skipping repository metadata update.")

        if not metadata_ok:
            error("Failed to update repository metadata.")

        if metadata_ok and upload_ok:
            pass
        elif upload_ok:
            collected_data['results']['skips'] += 1
            repo_result.update({
                'errorType':
                'FailedMetadata',
                'errorMessage':
                'Failed to update repository metadata',
            })
            if not skip_upload:
                error("Repo updated but metadata was not.")
            exit = exit or 1
        else:
            collected_data['results']['failures'] += 1
            repo_result.update({
                'errorType': 'FailedUpdate',
                'errorMessage': 'Failed to update repository',
            })
            error("Failed to update a repository.")
            exit = exit or 1
        collected_data['tests'].append(repo_result)
        return exit
Exemplo n.º 53
0
def _load_exception_handler(path, exc_info):
    error("Error loading tool with path %s" % path)
    traceback.print_exception(*exc_info, limit=1, file=sys.stderr)