def group_list(ctx, number: int) -> None:
    """Get a list of analysis groups.

    \f

    :param ctx: Click context holding group-level parameters
    :param number: The number of analysis groups to display
    :return:
    """

    client: Client = ctx["client"]
    result = GroupListResponse(groups=[], total=0)
    offset = 0
    while True:
        LOGGER.debug(f"Fetching groups with offset {offset}")
        resp = client.group_list(offset=offset)
        if not resp.groups:
            LOGGER.debug("Received empty group list response")
            break
        offset += len(resp.groups)
        result.groups.extend(resp.groups)
        if len(result.groups) >= number:
            LOGGER.debug(f"Received {len(result.groups)} groups")
            break

    # trim result to desired result number
    LOGGER.debug(f"Got {len(result.groups)} analyses, trimming to {number}")
    result = GroupListResponse(groups=result[:number], total=resp.total)
    write_or_print(FORMAT_RESOLVER[ctx["fmt"]].format_group_list(result))
예제 #2
0
def version(ctx) -> None:
    """Display API version information.

    \f

    :param ctx: Click context holding group-level parameters
    :return:
    """

    LOGGER.debug("Fetching version information")
    resp = ctx["client"].version()
    write_or_print(FORMAT_RESOLVER[ctx["fmt"]].format_version(resp))
예제 #3
0
def group_open(ctx, name: str) -> None:
    """Create a new group to assign future analyses to.

    \f

    :param ctx: Click context holding group-level parameters
    :param name: The name of the group to be created (autogenerated if empty)
    """

    LOGGER.debug(f"Opening group with name {name}")
    resp: GroupCreationResponse = ctx["client"].create_group(group_name=name)
    write_or_print("Opened group with ID {} and name '{}'".format(
        resp.group.identifier, resp.group.name))
예제 #4
0
def group_close(ctx, identifiers: List[str]) -> None:
    """Close/seal an existing group.

    \f

    :param ctx: Click context holding group-level parameters
    :param identifiers: The group ID(s) to seal
    """

    for identifier in identifiers:
        LOGGER.debug(f"Closing group for ID {identifier}")
        resp: GroupCreationResponse = ctx["client"].seal_group(
            group_id=identifier)
        write_or_print("Closed group with ID {} and name '{}'".format(
            resp.group.identifier, resp.group.name))
예제 #5
0
def version(ctx, remote_flag) -> None:
    """Display API version information.

    \f

    :param ctx: Click context holding group-level parameters
    :param remote_flag: Boolean to switch between local CLI and remote API version
    :return:
    """

    if remote_flag:
        LOGGER.debug("Fetching version information")
        resp = ctx["client"].version()
        write_or_print(FORMAT_RESOLVER[ctx["fmt"]].format_version(resp))
    else:
        click.echo(f"MythX CLI v{__version__}: https://github.com/dmuhs/mythx-cli")
예제 #6
0
파일: report.py 프로젝트: dmuhs/mythx-cli
def analysis_report(
    ctx,
    uuids: List[str],
    min_severity: Optional[str],
    swc_blacklist: Optional[List[str]],
    swc_whitelist: Optional[List[str]],
) -> None:
    """Fetch the report for a single or multiple job UUIDs.

    \f

    :param ctx: Click context holding group-level parameters
    :param uuids: List of UUIDs to display the report for
    :param min_severity: Ignore SWC IDs below the designated level
    :param swc_blacklist: A comma-separated list of SWC IDs to ignore
    :param swc_whitelist: A comma-separated list of SWC IDs to include
    :param table_sort_key: The column to sort the default table output by
    :return:
    """

    issues_list: List[Tuple[DetectedIssuesResponse,
                            Optional[AnalysisInputResponse]]] = []
    formatter: BaseFormatter = FORMAT_RESOLVER[ctx["fmt"]]
    for uuid in uuids:
        LOGGER.debug(f"{uuid}: Fetching report")
        resp = ctx["client"].report(uuid)
        LOGGER.debug(f"{uuid}: Fetching input")
        inp = (ctx["client"].request_by_uuid(uuid)
               if formatter.report_requires_input else None)

        LOGGER.debug(f"{uuid}: Applying SWC filters")
        util.filter_report(
            resp,
            min_severity=min_severity,
            swc_blacklist=swc_blacklist,
            swc_whitelist=swc_whitelist,
        )
        resp.uuid = uuid
        issues_list.append((resp, inp))

    LOGGER.debug(
        f"{uuid}: Printing report for {len(issues_list)} issue items with sort key \"{ctx['table_sort_key']}\""
    )
    write_or_print(
        formatter.format_detected_issues(issues_list,
                                         table_sort_key=ctx["table_sort_key"]))
    sys.exit(ctx["retval"])
예제 #7
0
def render(
    ctx,
    target: str,
    user_template: str,
    aesthetic: bool,
    markdown: bool,
    min_severity: Optional[str],
    swc_blacklist: Optional[List[str]],
    swc_whitelist: Optional[List[str]],
) -> None:
    """Render an analysis job or group report as HTML.

    \f
    :param ctx: Click context holding group-level parameters
    :param target: Group or analysis ID to fetch the data for
    :param user_template: User-defined template string
    :param aesthetic: DO NOT TOUCH IF YOU'RE BORING
    :param markdown: Flag to render a markdown report
    :param min_severity: Ignore SWC IDs below the designated level
    :param swc_blacklist: A comma-separated list of SWC IDs to ignore
    :param swc_whitelist: A comma-separated list of SWC IDs to include
    """

    client: Client = ctx["client"]
    # normalize target
    target = target.lower()
    default_template = DEFAULT_MD_TEMPLATE if markdown else DEFAULT_HTML_TEMPLATE
    # enables user to include library templates in their own
    template_dirs = [default_template.parent]

    if user_template:
        LOGGER.debug(f"Received user-defined template at {user_template}")
        user_template = Path(user_template)
        template_name = user_template.name
        template_dirs.append(user_template.parent)
    else:
        LOGGER.debug(f"Using default template {default_template.name}")
        template_name = default_template.name

    env_kwargs = {
        "trim_blocks": True,
        "lstrip_blocks": True,
        "keep_trailing_newline": True,
    }
    if not markdown:
        env_kwargs = {
            "trim_blocks": True,
            "lstrip_blocks": True,
            "keep_trailing_newline": True,
        }
        if aesthetic:
            LOGGER.debug(f"Overwriting template to go A E S T H E T I C")
            template_name = "aesthetic.html"

    LOGGER.debug("Initializing Jinja environment")
    env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dirs),
                             **env_kwargs)
    template = env.get_template(template_name)

    issues_list: List[Tuple[AnalysisStatusResponse, DetectedIssuesResponse,
                            Optional[AnalysisInputResponse], ]] = []
    if len(target) == 24:
        LOGGER.debug(f"Identified group target {target}")
        list_resp = client.analysis_list(group_id=target)
        offset = 0

        LOGGER.debug(f"Fetching analyses in group {target}")
        while len(list_resp.analyses) < list_resp.total:
            offset += len(list_resp.analyses)
            list_resp.analyses.extend(
                client.analysis_list(group_id=target, offset=offset))

        for analysis in list_resp.analyses:
            click.echo("Fetching report for analysis {}".format(analysis.uuid),
                       err=True)
            status, resp, inp = get_analysis_info(
                client=client,
                uuid=analysis.uuid,
                min_severity=min_severity,
                swc_blacklist=swc_blacklist,
                swc_whitelist=swc_whitelist,
            )
            issues_list.append((status, resp, inp))
    elif len(target) == 36:
        LOGGER.debug(f"Identified analysis target {target}")
        click.echo("Fetching report for analysis {}".format(target), err=True)
        status, resp, inp = get_analysis_info(
            client=client,
            uuid=target,
            min_severity=min_severity,
            swc_blacklist=swc_blacklist,
            swc_whitelist=swc_whitelist,
        )
        issues_list.append((status, resp, inp))
    else:
        LOGGER.debug(f"Could not identify target with length {len(target)}")
        raise click.UsageError(
            "Invalid target. Please provide a valid group or analysis job ID.")

    LOGGER.debug(f"Rendering template for {len(issues_list)} issues")
    rendered = template.render(issues_list=issues_list, target=target)
    if not markdown:
        LOGGER.debug(f"Minifying HTML report")
        rendered = htmlmin.minify(rendered, remove_comments=True)

    write_or_print(rendered, mode="w+")
예제 #8
0
def analyze(ctx, target: List[str], async_flag: bool, mode: str,
            create_group: bool, group_id: str, group_name: str,
            min_severity: str, swc_blacklist: str, swc_whitelist: str,
            solc_version: str, solc_path: str, include: Tuple[str],
            remap_import: Tuple[str], check_properties: bool,
            enable_scribble: bool, scribble_path: str, scenario: str,
            project_id: str) -> None:
    """Analyze the given directory or arguments with MythX.

    \f

    :param ctx: Click context holding group-level parameters
    :param target: Arguments passed to the `analyze` subcommand
    :param async_flag: Whether to execute the analysis asynchronously
    :param mode: Full or quick analysis mode
    :param create_group: Create a new group for the analysis
    :param group_id: The group ID to add the analysis to
    :param group_name: The group name to attach to the analysis
    :param min_severity: Ignore SWC IDs below the designated level
    :param swc_blacklist: A comma-separated list of SWC IDs to ignore
    :param swc_whitelist: A comma-separated list of SWC IDs to include
    :param solc_version: The solc version to use for Solidity compilation
    :param solc_path: The path to a custom solc executable
    :param include: List of contract names to send - exclude everything else
    :param remap_import: List of import remappings to pass on to solc
    :param check_properties: Enable property verification mode
    :param enable_scribble: Enable instrumentation with scribble
    :param scribble_path: Optional path to the scribble executable
    :param scenario: Force an analysis scenario
    :param project_id: Id of project to add this analysis to
    :return:
    """

    analyze_config = ctx.get("analyze")
    if async_flag is None:
        async_flag = analyze_config.get("async", False)
    if create_group is None:
        create_group = analyze_config.get("create-group", False)

    mode = mode or analyze_config.get("mode") or "quick"
    project_id = project_id or analyze_config.get("project-id") or None
    group_id = analyze_config.get("group-id") or group_id or None
    group_name = group_name or analyze_config.get("group-name") or ""
    min_severity = min_severity or analyze_config.get("min-severity") or None
    swc_blacklist = swc_blacklist or analyze_config.get("blacklist") or None
    swc_whitelist = swc_whitelist or analyze_config.get("whitelist") or None
    solc_version = solc_version or analyze_config.get("solc") or None
    include = include or analyze_config.get("contracts") or []
    remap_import = remap_import or analyze_config.get("remappings") or []
    check_properties = (check_properties
                        or analyze_config.get("check-properties") or False)
    enable_scribble = enable_scribble or analyze_config.get(
        "enable-scribble") or False
    scribble_path = scribble_path or analyze_config.get(
        "scribble-path") or "scribble"
    target = target or analyze_config.get("targets") or None
    scenario = scenario or analyze_config.get("scenario") or None

    if project_id and not create_group:
        LOGGER.debug(f"Only use project-id when create_group is enabled.")
        sys.exit(1)

    # enable property checking if explicitly requested or implicitly when
    # scribble instrumentation is requested
    ctx["client"].handler.middlewares.append(
        PropertyCheckingMiddleware(check_properties or enable_scribble))

    if create_group:
        resp: GroupCreationResponse = ctx["client"].create_group(
            group_name=group_name)
        group_id = resp.group.identifier
        group_name = resp.group.name or ""

    if group_id:
        # associate all following analyses to the passed or newly created group
        group_mw = GroupDataMiddleware(group_id=group_id,
                                       group_name=group_name)
        ctx["client"].handler.middlewares.append(group_mw)

    if project_id and group_id:
        resp: GroupOperationResponse = ctx["client"].add_group_to_project(
            group_id=group_id, project_id=project_id)
        if not resp.project_id == project_id:
            LOGGER.debug(
                f"Failed to add group to project with id {project_id}.")

    jobs: List[Dict[str, Any]] = []
    include = list(include)
    mode_list = determine_analysis_targets(target, forced_scenario=scenario)

    for scenario, element in mode_list:
        if scenario == ScenarioMode.TRUFFLE:
            job = TruffleJob(element)
            job.generate_payloads(
                enable_scribble=enable_scribble,
                remappings=remap_import,
                scribble_path=scribble_path,
            )
            jobs.extend(job.payloads)
        elif scenario == ScenarioMode.SOLIDITY_DIR:
            # recursively enumerate sol files if not a truffle project
            LOGGER.debug(
                f"Identified {element} as directory containing Solidity files")
            jobs.extend(
                SolidityJob.walk_solidity_files(
                    solc_version=solc_version,
                    solc_path=solc_path,
                    base_path=element,
                    remappings=remap_import,
                    enable_scribble=enable_scribble,
                    scribble_path=scribble_path,
                ))
        elif scenario == ScenarioMode.SOLIDITY_FILE:
            LOGGER.debug(f"Trying to interpret {element} as a solidity file")
            target_split = element.split(":")
            file_path, contract = target_split[0], target_split[1:]
            if contract:
                include += contract  # e.g. ["MyContract"] or []
                contract = contract[0]
            job = SolidityJob(Path(file_path))
            job.generate_payloads(
                version=solc_version,
                solc_path=solc_path,
                contract=contract or None,
                remappings=remap_import,
                enable_scribble=enable_scribble,
                scribble_path=scribble_path,
            )
            jobs.extend(job.payloads)

    # reduce to whitelisted contract names
    if include:
        LOGGER.debug(
            f"Filtering {len(jobs)} job(s) for contracts to be included")
        found_contracts = {job["contract_name"] for job in jobs}
        overlap = set(include).difference(found_contracts)
        if overlap:
            raise click.UsageError(
                f"The following contracts could not be found: {', '.join(overlap)}"
            )
        jobs = [job for job in jobs if job["contract_name"] in include]

    # filter jobs where no bytecode was produced
    LOGGER.debug(f"Filtering {len(jobs)} job(s) for empty bytecode")
    jobs = [job for job in jobs if is_valid_job(job)]

    # sanitize local paths
    LOGGER.debug(f"Sanitizing {len(jobs)} jobs")
    jobs = [sanitize_paths(job) for job in jobs]

    LOGGER.debug(f"Submitting {len(jobs)} analysis jobs to the MythX API")

    if not jobs:
        raise click.UsageError(
            ("No jobs were generated. Please make sure your Solidity files "
             "compile correctly or your Truffle project has been compiled."))

    consent = ctx["yes"] or click.confirm(f"Found {len(jobs)} job(s). Submit?")
    if not consent:
        LOGGER.debug("User consent not given - exiting")
        sys.exit(0)

    uuids = []
    with click.progressbar(jobs) as bar:
        for job in bar:
            # attach execution mode, submit, poll
            job.update({"analysis_mode": mode})
            resp = ctx["client"].analyze(**job)
            uuids.append(resp.uuid)

    if async_flag:
        LOGGER.debug(
            f"Asynchronous submission enabled - printing {len(uuids)} UUIDs and exiting"
        )
        write_or_print("\n".join(uuids))
        return

    issues_list: List[Tuple[DetectedIssuesResponse,
                            Optional[AnalysisInputResponse]]] = []
    formatter: BaseFormatter = FORMAT_RESOLVER[ctx["fmt"]]
    for uuid in uuids:
        while not ctx["client"].analysis_ready(uuid):
            # TODO: Add poll interval option
            LOGGER.debug(f"Analysis {uuid} not ready yet - waiting")
            time.sleep(3)
        LOGGER.debug(f"{uuid}: Fetching report")
        resp: DetectedIssuesResponse = ctx["client"].report(uuid)
        LOGGER.debug(f"{uuid}: Fetching input")
        inp: Optional[AnalysisInputResponse] = ctx["client"].request_by_uuid(
            uuid) if formatter.report_requires_input else None

        LOGGER.debug(f"{uuid}: Applying SWC filters")
        util.filter_report(
            resp,
            min_severity=min_severity,
            swc_blacklist=swc_blacklist,
            swc_whitelist=swc_whitelist,
        )
        # extend response with job UUID to keep formatter logic isolated
        resp.uuid = uuid
        issues_list.append((resp, inp))

    LOGGER.debug(
        f"Printing report for {len(issues_list)} issue items with sort key \"{ctx['table_sort_key']}\""
    )
    write_or_print(
        formatter.format_detected_issues(issues_list,
                                         table_sort_key=ctx["table_sort_key"]))
    sys.exit(ctx["retval"])
예제 #9
0
def analyze(
    ctx,
    target: List[str],
    async_flag: bool,
    mode: str,
    create_group: bool,
    group_id: str,
    group_name: str,
    min_severity: str,
    swc_blacklist: str,
    swc_whitelist: str,
    solc_version: str,
    include: Tuple[str],
    remap_import: Tuple[str],
) -> None:
    """Analyze the given directory or arguments with MythX.

    \f

    :param ctx: Click context holding group-level parameters
    :param target: Arguments passed to the `analyze` subcommand
    :param async_flag: Whether to execute the analysis asynchronously
    :param mode: Full or quick analysis mode
    :param create_group: Create a new group for the analysis
    :param group_id: The group ID to add the analysis to
    :param group_name: The group name to attach to the analysis
    :param min_severity: Ignore SWC IDs below the designated level
    :param swc_blacklist: A comma-separated list of SWC IDs to ignore
    :param swc_whitelist: A comma-separated list of SWC IDs to include
    :param solc_version: The solc version to use for Solidity compilation
    :param include: List of contract names to send - exclude everything else
    :param remap_import: List of import remappings to pass on to solc
    :return:
    """

    analyze_config = ctx.get("analyze")
    if analyze_config is not None:
        LOGGER.debug("Detected additional yaml config keys - applying")
        config_async = analyze_config.get("async")
        async_flag = config_async if config_async is not None else async_flag
        mode = analyze_config.get("mode") or mode
        config_create_group = analyze_config.get("create-group")
        create_group = (config_create_group
                        if config_create_group is not None else create_group)
        group_id = analyze_config.get("group-id") or group_id
        group_name = analyze_config.get("group-name") or group_name
        min_severity = analyze_config.get("min-severity") or min_severity
        swc_blacklist = analyze_config.get("blacklist") or swc_blacklist
        swc_whitelist = analyze_config.get("whitelist") or swc_whitelist
        solc_version = analyze_config.get("solc") or solc_version
        include = analyze_config.get("contracts") or include
        remap_import = analyze_config.get("remappings") or remap_import
        target = analyze_config.get("targets") or target

    group_name = group_name or ""
    if create_group:
        resp: GroupCreationResponse = ctx["client"].create_group(
            group_name=group_name)
        group_id = resp.group.identifier
        group_name = resp.group.name or ""

    if group_id:
        # associate all following analyses to the passed or newly created group
        group_mw = GroupDataMiddleware(group_id=group_id,
                                       group_name=group_name)
        ctx["client"].handler.middlewares.append(group_mw)

    jobs: List[Dict[str, Any]] = []
    include = list(include)

    if not target:
        if Path("truffle-config.js").exists() or Path("truffle.js").exists():
            files = find_truffle_artifacts(Path.cwd())
            if not files:
                raise click.exceptions.UsageError((
                    "Could not find any truffle artifacts. Are you in the project root? "
                    "Did you run truffle compile?"))
            LOGGER.debug(
                f"Detected Truffle project with files:{', '.join(files)}")
            for file in files:
                jobs.append(generate_truffle_payload(file))

        elif list(glob("*.sol")):
            LOGGER.debug(f"Detected Solidity files in directory")
            jobs = walk_solidity_files(ctx=ctx,
                                       solc_version=solc_version,
                                       remappings=remap_import)
        else:
            raise click.exceptions.UsageError(
                "No argument given and unable to detect Truffle project or Solidity files"
            )
    else:
        for target_elem in target:
            target_split = target_elem.split(":")
            element, suffix = target_split[0], target_split[1:]
            include += suffix
            if element.startswith("0x"):
                LOGGER.debug(f"Identified target {element} as bytecode")
                jobs.append(generate_bytecode_payload(element))
            elif Path(element).is_file() and Path(element).suffix == ".sol":
                LOGGER.debug(
                    f"Trying to interpret {element} as a solidity file")
                jobs.append(
                    generate_solidity_payload(
                        file=element,
                        version=solc_version,
                        contracts=suffix,
                        remappings=remap_import,
                    ))
            elif Path(element).is_dir():
                LOGGER.debug(f"Identified target {element} as directory")
                files = find_truffle_artifacts(Path(element))
                if files:
                    # extract truffle artifacts if config found in target
                    LOGGER.debug(
                        f"Identified {element} directory as truffle project")
                    jobs.extend(
                        [generate_truffle_payload(file) for file in files])
                else:
                    # recursively enumerate sol files if not a truffle project
                    LOGGER.debug(
                        f"Identified {element} as directory containing Solidity files"
                    )
                    jobs.extend(
                        walk_solidity_files(
                            ctx,
                            solc_version,
                            base_path=element,
                            remappings=remap_import,
                        ))
            else:
                raise click.exceptions.UsageError(
                    f"Could not interpret argument {element} as bytecode, Solidity file, or Truffle project"
                )

    # sanitize local paths
    LOGGER.debug(f"Sanitizing {len(jobs)} jobs")
    jobs = [sanitize_paths(job) for job in jobs]
    # filter jobs where no bytecode was produced
    LOGGER.debug(f"Filtering {len(jobs)} jobs for empty bytecode")
    jobs = [job for job in jobs if is_valid_job(job)]

    # reduce to whitelisted contract names
    if include:
        LOGGER.debug(f"Filtering {len(jobs)} for contracts to be included")
        found_contracts = {job["contract_name"] for job in jobs}
        overlap = set(include).difference(found_contracts)
        if overlap:
            raise click.UsageError(
                f"The following contracts could not be found: {', '.join(overlap)}"
            )
        jobs = [job for job in jobs if job["contract_name"] in include]

    LOGGER.debug(f"Submitting {len(jobs)} analysis jobs to the MythX API")
    uuids = []
    with click.progressbar(jobs) as bar:
        for job in bar:
            # attach execution mode, submit, poll
            job.update({"analysis_mode": mode})
            resp = ctx["client"].analyze(**job)
            uuids.append(resp.uuid)

    if async_flag:
        LOGGER.debug(
            f"Asynchronous submission enabled - printing {len(uuids)} UUIDs and exiting"
        )
        write_or_print("\n".join(uuids))
        return

    issues_list: List[Tuple[DetectedIssuesResponse,
                            Optional[AnalysisInputResponse]]] = []
    formatter: BaseFormatter = FORMAT_RESOLVER[ctx["fmt"]]
    for uuid in uuids:
        while not ctx["client"].analysis_ready(uuid):
            # TODO: Add poll interval option
            LOGGER.debug(f"Analysis {uuid} not ready yet - waiting")
            time.sleep(3)
        LOGGER.debug(f"{uuid}: Fetching report")
        resp: DetectedIssuesResponse = ctx["client"].report(uuid)
        LOGGER.debug(f"{uuid}: Fetching input")
        inp: Optional[AnalysisInputResponse] = ctx["client"].request_by_uuid(
            uuid) if formatter.report_requires_input else None

        LOGGER.debug(f"{uuid}: Applying SWC filters")
        util.filter_report(
            resp,
            min_severity=min_severity,
            swc_blacklist=swc_blacklist,
            swc_whitelist=swc_whitelist,
        )
        # extend response with job UUID to keep formatter logic isolated
        resp.uuid = uuid
        issues_list.append((resp, inp))

    LOGGER.debug(f"Printing report for {len(issues_list)} issue items")
    write_or_print(formatter.format_detected_issues(issues_list))
    sys.exit(ctx["retval"])