Exemplo n.º 1
0
def test_add_manifest_force():
    registry_data = RegistryData.from_json(VALID_DATA)
    LOCAL = ANALYZER_B.copy()
    LOCAL["version"] = UNIQUE_VERSION

    registry_data = registry_data.add_pending_manifest(
        AnalyzerManifest.from_json(LOCAL), force=True)
    registry_data = registry_data.add_pending_manifest(
        AnalyzerManifest.from_json(LINKED_ANALYZER_A), force=True)

    assert registry_data._resolve("test-org-name/a", "0.0.1")
    sorted_deps = registry_data.sorted_deps(
        SpecifiedAnalyzer(
            VersionedAnalyzer(AnalyzerName("test-org-name/a"),
                              Version("0.0.1"))))
    assert (SpecifiedAnalyzer(
        VersionedAnalyzer(AnalyzerName("test-org-name/b"),
                          Version(UNIQUE_VERSION))) in sorted_deps)
    assert (SpecifiedAnalyzer(
        VersionedAnalyzer(AnalyzerName("test-org-name/c"), Version("0.0.1")))
            in sorted_deps)
    # test that linking is local: Overriding A->B edge does not overried C->B edge
    sorted_deps = registry_data.sorted_deps(
        SpecifiedAnalyzer(
            VersionedAnalyzer(AnalyzerName("test-org-name/c"),
                              Version("0.0.1"))))
    assert (SpecifiedAnalyzer(
        VersionedAnalyzer(AnalyzerName("test-org-name/b"), Version("0.0.2")))
            in sorted_deps)
Exemplo n.º 2
0
def build_docker(
    analyzer_name: AnalyzerName,
    version: Version,
    docker_context: str,
    dockerfile_path: Optional[str] = None,
    env_args_dict: Dict = {},
    verbose: bool = False,
) -> int:
    env_args_dict = {**DEFAULT_ENV_ARGS_TO_DOCKER, **env_args_dict}
    docker_image = VersionedAnalyzer(analyzer_name, version).image_id
    if not dockerfile_path:
        dockerfile_path = f"{docker_context}/Dockerfile"
    extra_build_args = [
        f"--build-arg {k}={v}" for (k, v) in env_args_dict.items()
    ]
    build_cmd = (
        f"docker build -t {docker_image} -f {dockerfile_path} {docker_context} "
        + " ".join(extra_build_args))
    if verbose:
        build_cmd += " 1>&2"
    else:
        build_cmd += " >/dev/null"

    logger.debug(f"building with build command: {build_cmd}")
    status = subprocess.call(build_cmd, shell=True)
    return status
Exemplo n.º 3
0
 def from_json_str(cls, json_str: str) -> "SpecifiedAnalyzer":
     obj = json.loads(json_str)
     if "parameters" in obj:
         parameters = AnalyzerParameters(obj["parameters"])
     else:
         parameters = AnalyzerParameters({})
     va = VersionedAnalyzer.from_json(obj["versioned_analyzer"])
     return cls(va, parameters)
Exemplo n.º 4
0
 def container_log_key(git_url: str, commit_hash: str, image_id: str) -> str:
     """
         Returns key that docker container log is stored with
     """
     analyzer = VersionedAnalyzer.from_image_id(image_id)
     repo_id = url_to_repo_id(git_url)
     return (
         f"{analyzer.name}/{analyzer.version}/{repo_id}/{commit_hash}/container.log"
     )
Exemplo n.º 5
0
def test_equality():
    sa = SpecifiedAnalyzer(VERSIONED_ANALYZER, PARAMETERS)

    # Constructing from new objects
    sa2 = SpecifiedAnalyzer(
        VersionedAnalyzer(AnalyzerName("r2c/test-analyzer"), Version("1.2.3")),
        AnalyzerParameters({}),
    )

    assert sa == sa2
Exemplo n.º 6
0
def prepull_analyzers(analyzer_name: str, version: Version) -> None:
    """
        Pulls all needed analyzers to run SPECIFIED_ANALYZER (i.e. dependencies)
    """

    specified_analyzer = SpecifiedAnalyzer(
        VersionedAnalyzer(AnalyzerName(analyzer_name), version))
    registry = RegistryData.from_json(REGISTRY)

    deps = registry.sorted_deps(specified_analyzer)
    client = get_docker_client()
    for dep in deps:
        if _should_pull_analyzer(dep):
            client.images.pull(dep.versioned_analyzer.image_id)
Exemplo n.º 7
0
    def _dependency_graph(
        self, subgraph_from_node: SpecifiedAnalyzer = None
    ) -> Graph[SpecifiedAnalyzer]:
        edges = set()
        nodes = set()

        if subgraph_from_node:
            to_explore = set([subgraph_from_node])
        else:
            to_explore = set(SpecifiedAnalyzer(va) for va in self.versioned_analyzers)

        # this loop terminates after at most sum(len(deps)) because we always pop values off
        # `to_explore` and add them to `nodes`, and only add values to `to_explore` if
        # they're not in `nodes`.
        while to_explore:
            sa = to_explore.pop()
            next_manifest = self.manifest_for(sa.versioned_analyzer)
            if not next_manifest:
                raise ManifestNotFoundException(
                    f"manifest not found for analyzer {sa.versioned_analyzer.name} at version {sa.versioned_analyzer.version}. Registry data: {self.to_json()}"
                )

            deps = next_manifest.dependencies

            nodes.add(sa)
            for dep in deps:
                resolved_version = self._resolve(
                    AnalyzerName(dep.name), dep.wildcard_version
                )
                if resolved_version is None:
                    raise Exception(f"Can't resolve dependency {dep} of {sa}")

                resolved_dep = VersionedAnalyzer(dep.name, resolved_version)
                specified_dep = SpecifiedAnalyzer(resolved_dep, dep.parameters)

                edges.add((sa, specified_dep))
                if specified_dep not in nodes:
                    to_explore.add(specified_dep)

        return Graph(list(nodes), list(edges))
Exemplo n.º 8
0
def unittest(ctx, analyzer_directory, env_args_string):
    """
    Locally unit tests for the current analyzer directory.

    You can define how to run your unit tests in `src/unittest.sh`.

    You may have to login if your analyzer depends on privately
    published analyzers.
    """
    debug = ctx.obj["DEBUG"]
    env_args_dict = parse_remaining(env_args_string)

    manifest, analyzer_directory = find_and_open_analyzer_manifest(
        analyzer_directory, ctx
    )

    abort_on_build_failure(
        build_docker(
            manifest.analyzer_name,
            manifest.version,
            os.path.relpath(analyzer_directory, os.getcwd()),
            env_args_dict={**DEFAULT_ENV_ARGS_TO_DOCKER, **env_args_dict},
            verbose=debug,
        )
    )

    image_id = VersionedAnalyzer(manifest.analyzer_name, manifest.version).image_id

    status = run_docker_unittest(
        analyzer_directory=analyzer_directory,
        analyzer_name=manifest.analyzer_name,
        docker_image=image_id,
        verbose=debug,
        env_args_dict={**DEFAULT_ENV_ARGS_TO_DOCKER, **env_args_dict},
    )
    if status == 0:
        print_success(f"Unit tests passed")
        sys.exit(0)
    else:
        print_error_exit(f"Unit tests failed with status {status}", status_code=status)
Exemplo n.º 9
0
    def get_direct_dependencies(self, va: VersionedAnalyzer) -> List[SpecifiedAnalyzer]:
        """
            Returns direct dependencies of an analyzer
        """
        manifest = self.manifest_for(va)
        if manifest is None:
            raise ManifestNotFoundException(f"manifest not found for {va}.")

        resolved_values = []
        for dep in manifest.dependencies:
            resolved_version = self._resolve(
                AnalyzerName(dep.name), dep.wildcard_version
            )
            if resolved_version is not None:
                resolved_values.append(
                    SpecifiedAnalyzer(
                        VersionedAnalyzer(AnalyzerName(dep.name), resolved_version),
                        dep.parameters,
                    )
                )

        return resolved_values
Exemplo n.º 10
0
def run_analyzer_on_local_code(
    registry_data: RegistryData,
    manifest: AnalyzerManifest,
    workdir: Optional[str],
    analyzer_dir: str,
    code_dir: str,
    output_path: Optional[str],
    show_output_on_stdout: bool,
    pass_analyzer_output: bool,
    no_preserve_workdir: bool,
    env_args_dict: dict,
    interactive_index: Optional[int] = None,
    interactive_name: Optional[str] = None,
    reset_cache: bool = False,
    validator: Callable[[str], bool] = None,
    parameters: Optional[Dict[str, str]] = None,
) -> Optional[bool]:
    """Run an analyzer on a local folder. Returns the result of any validator, if
    present, or None if there was no validation performed.

    Args:
        output_path: if supplied, the analyzer output file (ex output.json, fs.tar.gz), will be written to this local path
        show_output_on_stdout: show the analyzer output file on stdout
        pass_analyzer_output: if false, analyzer stdout and stderr will be supressed
        validator: a callable function that takes as its argument the output.json of an analyzer and returns whether it is valid for the analyzer's schema
        interactive_index: don't start the container - just shell into the container (specified by index) before anlayzer executes and exit
    """
    json_output_store = LocalJsonOutputStore()
    filesystem_output_store = LocalFilesystemOutputStore()
    log_store = LocalLogStore()

    if reset_cache:
        json_output_store.delete_all()
        filesystem_output_store.delete_all()
        log_store.delete_all()

    pathlib.Path(LOCAL_RUN_TMP_FOLDER).mkdir(parents=True, exist_ok=True)

    versioned_analyzer = VersionedAnalyzer(manifest.analyzer_name,
                                           manifest.version)

    if not manifest.is_locally_linked:
        # try adding the manifest of the current analyzer if it isn't already there
        if versioned_analyzer not in registry_data.versioned_analyzers:
            logger.info(
                "Analyzer manifest not present in registry. Adding it to the local copy of registry."
            )
            registry_data = registry_data.add_pending_manifest(manifest)
        else:
            logger.info("Analyzer manifest already present in registry")
    else:
        registry_data = setup_locally_linked_analyzer(manifest, registry_data,
                                                      analyzer_dir)

    url_placeholder, commit_placeholder = get_local_git_origin_and_commit(
        code_dir)

    # Add any parameters required to specified_analyzer
    if parameters is not None:
        parameters = AnalyzerParameters(parameters)
    specified_analyzer = SpecifiedAnalyzer(versioned_analyzer, parameters)

    # Delete cached result of top level analyzer if it exists
    json_output_store.delete(url_placeholder, commit_placeholder,
                             specified_analyzer)
    filesystem_output_store.delete(url_placeholder, commit_placeholder,
                                   specified_analyzer)

    # get all cloner versions from registry so we can copy the passed in code directory in place
    # of output for all versions of cloner
    versions = [
        sa.versioned_analyzer
        for sa in registry_data.sorted_deps(specified_analyzer)
        if sa.versioned_analyzer.name in SPECIAL_ANALYZERS
    ]
    logger.info(
        f'"Uploading" (moving) code directory as the output of all cloners. Cloner versions: {versions}'
    )

    # No good way to provide an undefined-like as an argument to a func with a default arg
    if workdir is not None and os.path.exists(os.path.abspath(workdir)):
        abs_workdir = os.path.abspath(workdir)
        logger.info(f"CLI-specified workdir: {abs_workdir}")

        if len(os.listdir(abs_workdir)) > 0:
            if not no_preserve_workdir:
                logger.error(
                    "CLI-specified workdir is not empty! This directory must be empty or you must pass the `--no-preserve-workdir` option."
                )
                raise WorkdirNotEmptyError(abs_workdir)
            else:
                logger.warning(
                    "CLI-specified workdir is not empty, but override flag used!"
                )
                logger.warning(
                    "RUNNING ANALYZERS MAY MODIFY OR CLEAR WORKDIR CONTENTS WITHOUT WARNING!"
                )
                logger.warning("THIS IS YOUR LAST CHANCE TO BAIL OUT!")

        analyzer = Analyzer(
            registry_data,
            json_output_store,
            filesystem_output_store,
            log_store,
            localrun=True,
            workdir=abs_workdir,
            timeout=0,
        )
    else:
        logger.info("Using default workdir")
        analyzer = Analyzer(
            registry_data,
            json_output_store,
            filesystem_output_store,
            log_store,
            localrun=True,
            timeout=0,
        )

    for va in versions:
        with tempfile.TemporaryDirectory(
                prefix=LOCAL_RUN_TMP_FOLDER) as mount_folder:
            logger.info(f"Created tempdir at {mount_folder}")
            os.mkdir(os.path.join(mount_folder, "output"))

            if not os.path.exists(code_dir):
                raise Exception("that code directory doesn't exist")

            output_fs_path = os.path.join(mount_folder, "output", "fs")

            if os.name == "nt":
                try:
                    if not symlink_exists(code_dir):
                        shutil.copytree(code_dir, output_fs_path)
                    else:
                        shutil.copytree(
                            code_dir,
                            output_fs_path,
                            symlinks=True,
                            ignore_dangling_symlinks=True,
                        )
                except shutil.Error as e:
                    raise SymlinkNeedsElevationError(
                        "You may need admin privileges to operate on symlinks")
            else:
                shutil.copytree(
                    code_dir,
                    output_fs_path,
                    symlinks=True,
                    ignore_dangling_symlinks=True,
                )

            # "upload" output using our LocalDir infra (actually just a copy)
            analyzer.upload_output(SpecifiedAnalyzer(va), url_placeholder,
                                   commit_placeholder, mount_folder)

    start_ts = time.time()

    results = analyzer.full_analyze_request(
        git_url=url_placeholder,
        commit_string=commit_placeholder,
        specified_analyzer=specified_analyzer,
        force=False,
        interactive_index=interactive_index,
        interactive_name=interactive_name,
        pass_analyzer_output=pass_analyzer_output,
        memory_limit=CONTAINER_MEMORY_LIMIT,
        env_args_dict=env_args_dict,
    )
    analyzer_time = time.time() - start_ts

    # Can't use NamedTemporaryFile here because we are copying to the
    # file by name and not by the already opened file handle
    # Should wrap this in a context manager (https://github.com/returntocorp/echelon-backend/issues/2735)
    if not output_path:
        _, output_path_used = tempfile.mkstemp(dir=get_tmp_dir())
    else:
        output_path_used = output_path

    # Get Final Output
    if manifest.output_type == AnalyzerOutputType.json:
        json_output_store.get(url_placeholder, commit_placeholder,
                              specified_analyzer, output_path_used)
    elif manifest.output_type == AnalyzerOutputType.filesystem:
        filesystem_output_store.get(url_placeholder, commit_placeholder,
                                    specified_analyzer, output_path_used)

    if show_output_on_stdout:
        logger.info(
            f"Analyzer output (found in: {results['container_output_path']})")
        logger.info("=" * 60)
        if tarfile.is_tarfile(output_path_used):
            with tarfile.open(output_path_used, "r") as tar:
                tar.list(verbose=False)
        else:
            with open(output_path_used, "r") as f:
                print(f.read())  # explicitly send this to stdout

    if validator:
        return validator(output_path_used)

    if not output_path:
        os.remove(output_path_used)
    else:
        logger.info(f"Wrote analyzer output to: {output_path_used}")

    return None
Exemplo n.º 11
0
 def versioned_analyzers(self):
     return [
         VersionedAnalyzer(analyzer_name, version)
         for analyzer_name, analyzer_data in self._data.items()
         for version in analyzer_data.versions.keys()
     ]
Exemplo n.º 12
0
    def add_pending_manifest(
        self, manifest: AnalyzerManifest, force: bool = False
    ) -> "RegistryData":
        """
            Add this manifest into the current registry data as pending upload.
            This method first verifies that:
            1. Name conforms to org/name
            2. This is not a duplicate versioned analyzer
            3. It's dependencies can be resolved
            4. It doesn't cause circular dependencies

            Arguments:
                manifest: The manifest of the analyzer we want to add to the registry
                force: Force overwrite into registry if manifest already exists with matching name.
                    This flag nullifies the InvalidManifestException thrown for manifest that already exists

            Returns:
                A new RegistryData object with manifest added in.

            Throws:
                An InvalidManifestException if the manifest can't be added
        """
        name = manifest.analyzer_name
        version = manifest.version
        va = VersionedAnalyzer(name, version)
        # check that name looks like org/name
        # don't do this check for now until we change analyzer naming everywhere else
        # TODO: Actually get the current org's name current_org
        # if not is_analyzer_of_org(name, current_org):
        #     raise Exception(f"Analyzer name must be of the form {org_name}/name")

        # create here and return at the end because it comes in handy
        new_reg = self.UNSAFE_add_manifest(manifest)
        # check that we can resolve its dependencies
        for dep in manifest.dependencies:
            # Check that it doesn't depend on itself
            if dep.name == name:
                raise InvalidManifestException(
                    f"Resolving this dependency: {dep} But analyzer can't depend on itself."
                )
            resolved_version = new_reg._resolve(
                AnalyzerName(dep.name), dep.wildcard_version
            )

            if dep.path:
                if not os.path.isdir(dep.path) or not os.path.exists(dep.path):
                    raise InvalidLocalPathException(
                        f"A dependency in this manifest cannot be resolved: {dep}"
                    )
            else:
                if resolved_version is None:
                    raise InvalidManifestException(
                        f"A dependency in this manifest cannot be resolved: {dep}"
                    )

        # Check that we don't already have a manifest for it.
        # i.e. don't allow a new manifest without changing analyzer version.
        # TODO: check that it's increased
        analyzer_data = self._data.get(name)
        if analyzer_data:
            if version in analyzer_data.versions.keys():
                if not force:
                    raise InvalidManifestException(
                        f"A manifest for this analyzer and version already exists: {va}"
                    )

        # and see if it can be topologically sorted
        deps_graph = new_reg._dependency_graph()
        try:
            sorted_deps = deps_graph.topo_sorted()
        except CircularDependencyError as e:
            raise InvalidManifestException(
                f"This manifest would cause a cycle in the dependendency graph"
            )

        # all is well? return the new registry
        return new_reg
Exemplo n.º 13
0
 def versioned_analyzers(self) -> List[VersionedAnalyzer]:
     return [
         VersionedAnalyzer(name, version)
         for name in self._data.keys()
         for version in self._data[name].versions.keys()
     ]
Exemplo n.º 14
0
def push(ctx, analyzer_directory, env_args_string):
    """
    Push the analyzer in the current directory to the R2C platform.

    You must log in to push analyzers.

    This command will validate your analyzer and privately publish your analyzer
    to your org with the name specified in analyzer.json.

    Your analyzer name must follow {org}/{name}.
    """
    debug = ctx.obj["DEBUG"]
    env_args_dict = parse_remaining(env_args_string)

    manifest, analyzer_directory = find_and_open_analyzer_manifest(
        analyzer_directory, ctx
    )
    analyzer_org = get_org_from_analyzer_name(manifest.analyzer_name)

    # TODO(ulzii): let's decide which source of truth we're using for analyzer_name above and/or check consistency.
    # can't have both dir name and what's in analyzer.json
    print_msg(f"Pushing analyzer in {analyzer_directory}...")

    default_org = get_default_org()
    if default_org != analyzer_org:
        print_error_exit(
            f"Attempting to push to organization: `{default_org}`. However, the org specified as the prefix of the analyzer name in `analyzer.json` does not match it. "
            + f"Replace `{analyzer_org}` with `{default_org}` and try again."
            + "Please ask for help from R2C support"
        )

    try:
        # upload analyzer.json
        artifact_link = upload_analyzer_manifest(manifest)
    except Exception as e:
        message = getattr(e, "message", repr(e))
        print_error_exit(f"There was an error uploading your analyzer: {message}")
    if artifact_link is None:
        print_error_exit(
            "There was an error uploading your analyzer. Please ask for help from R2C support"
        )
    # get docker login creds
    creds = get_docker_creds(artifact_link)
    if creds is None:
        print_error_exit(
            "There was an error getting Docker credentials. Please ask for help from R2C support"
        )
    # docker login
    successful_login = docker_login(creds)
    if not successful_login:
        print_error_exit(
            "There was an error logging into Docker. Please ask for help from R2C support"
        )
    # docker build and tag
    abort_on_build_failure(
        build_docker(
            manifest.analyzer_name,
            manifest.version,
            os.path.relpath(analyzer_directory, os.getcwd()),
            env_args_dict={**DEFAULT_ENV_ARGS_TO_DOCKER, **env_args_dict},
            verbose=debug,
        )
    )
    # docker push
    image_id = VersionedAnalyzer(manifest.analyzer_name, manifest.version).image_id
    successful_push = docker_push(image_id)
    if not successful_push:
        print_error_exit(
            "There was an error pushing the Docker image. Please ask for help from R2C support"
        )
    # mark uploaded with API
    # TODO figure out how to determine org from analyzer.json
    try:
        uploaded_url = f"{get_base_url()}/api/v1/analyzers/{manifest.analyzer_name}/{manifest.version}/uploaded"
        r = auth_put(uploaded_url)
        data = handle_request_with_error_message(r)
        if data.get("status") == "uploaded":
            web_url = data["links"]["web_url"]
            # display status to user and give link to view in web UI
            print_success(f"Successfully uploaded analyzer! Visit: {web_url}")
        else:
            print_error_exit(
                "Error confirming analyzer was successfully uploaded. Please contact us with the following information: failed to confirm analyzer finished uploading."
            )
    except Exception as e:
        message = getattr(e, "message", repr(e))
        print_error_exit(
            f"Error confirming analyzer was successfully uploaded: {message}"
        )
Exemplo n.º 15
0
import json

from r2c.lib.specified_analyzer import SpecifiedAnalyzer, AnalyzerParameters
from r2c.lib.versioned_analyzer import AnalyzerName, VersionedAnalyzer

from semantic_version import Version


ANALYZER_NAME = AnalyzerName("r2c/test-analyzer")
VERSION = Version("1.2.3")
VERSIONED_ANALYZER = VersionedAnalyzer(ANALYZER_NAME, VERSION)
PARAMETERS = AnalyzerParameters({})


def test_constructor():
    sa = SpecifiedAnalyzer(VERSIONED_ANALYZER, PARAMETERS)
    assert ANALYZER_NAME == sa.versioned_analyzer.name
    assert VERSION == sa.versioned_analyzer.version
    assert sa.parameters is not None
    for parameter in sa.parameters:
        assert PARAMETERS[parameter] == sa.parameters[parameter]


def test_json_conversion():
    sa = SpecifiedAnalyzer(VERSIONED_ANALYZER, PARAMETERS)
    sa2 = SpecifiedAnalyzer.from_json_str(json.dumps(sa.to_json()))

    assert sa.versioned_analyzer.name == sa2.versioned_analyzer.name
    assert sa.versioned_analyzer.version == sa2.versioned_analyzer.version

    # Parameters Match
Exemplo n.º 16
0
def run_analyzer_on_local_code(
    analyzer_name: str,
    version: Version,
    base: Path,
    ignore_files: Set[Path],
    target_files: Iterable[str],
) -> JsonR:
    """Run an analyzer on a local folder.
    """
    get_docker_client()  # Ensures that docker is running

    specified_analyzer = SpecifiedAnalyzer(
        VersionedAnalyzer(AnalyzerName(analyzer_name), version))
    registry = RegistryData.from_json(REGISTRY)

    json_output_store = LocalJsonOutputStore()
    filesystem_output_store = LocalFilesystemOutputStore()
    log_store = LocalLogStore()
    stats_store = LocalStatsStore()

    # All cacheing should be handled by bento
    json_output_store.delete_all()  # type: ignore
    filesystem_output_store.delete_all()  # type: ignore
    log_store.delete_all()  # type: ignore
    stats_store.delete_all()  # type: ignore

    pathlib.Path(LOCAL_RUN_TMP_FOLDER).mkdir(parents=True, exist_ok=True)

    analyzer = Analyzer(
        registry,
        json_output_store,
        filesystem_output_store,
        log_store,
        stats_store,
        workdir=LOCAL_RUN_TMP_FOLDER,
        timeout=
        0,  # Note Timeout relied on signaling which is not valid in multithreaded world
        memory_limit=CONTAINER_MEMORY_LIMIT,
    )

    # get all cloner versions from registry so we can copy the passed in code directory in place
    # of output for all versions of cloner
    fetchers = [
        sa for sa in registry.sorted_deps(specified_analyzer)
        if sa.versioned_analyzer.name in SPECIAL_ANALYZERS
    ]

    analyzer_input = LocalCode(str(base))
    for fetcher in fetchers:
        _copy_local_input(
            analyzer,
            fetcher.versioned_analyzer,
            analyzer_input,
            ignore_files,
            set(target_files),
        )

    analyzer.full_analyze_request(
        analyzer_input=analyzer_input,
        specified_analyzer=specified_analyzer,
        force=False,
    )

    # Get Final Output
    output = json_output_store.read(analyzer_input, specified_analyzer)
    if output is None:
        output = ""
    output_json = json.loads(output).get("results", [])

    # Cleanup state
    json_output_store.delete_all()  # type: ignore
    filesystem_output_store.delete_all()  # type: ignore
    log_store.delete_all()  # type: ignore
    stats_store.delete_all()  # type: ignore

    return output_json
Exemplo n.º 17
0
def test(ctx, analyzer_directory, which, cache, env_args_string):
    """
    Locally run tests for the current analyzer.

    You can add integration test files to the `examples/` directory.
    For more information, refer to the integration test section of the README.

    For unittests, you can define how to run your unit tests in `src/unittest.sh`.

    You may have to login if your analyzer depends on privately
    published analyzers.
    """

    verbose = ctx.obj["VERBOSE"]
    env_args_dict = parse_remaining(env_args_string)
    print_msg(
        f"Running integration tests for analyzer {'with debug mode' if ctx.obj['DEBUG'] else ''}"
    )

    manifest, analyzer_directory = find_and_open_analyzer_manifest(
        analyzer_directory, ctx
    )
    check_docker_is_running()
    print_msg("🔨 Building docker container")
    abort_on_build_failure(
        build_docker(
            manifest.analyzer_name,
            manifest.version,
            os.path.relpath(analyzer_directory, os.getcwd()),
            env_args_dict=env_args_dict,
        )
    )
    if which == "unit" or which == "all":
        image_id = VersionedAnalyzer(manifest.analyzer_name, manifest.version).image_id
        status = run_docker_unittest(
            analyzer_directory=analyzer_directory,
            analyzer_name=manifest.analyzer_name,
            docker_image=image_id,
            verbose=verbose,
            env_args_dict=env_args_dict,
        )
        if status == 0:
            print_success(f"Unit tests passed")
        else:
            print_error(f"Unit tests failed with status {status}")
    if which == "integration" or which == "all":
        try:
            passed_all = integration_test(
                manifest=manifest,
                analyzer_directory=analyzer_directory,
                workdir=None,
                verbose=verbose,
                env_args_dict=env_args_dict,
                registry_data=get_registry_data(),
                use_cache=cache,
            )
            if passed_all:
                print_success(f"All integration tests passed")
            else:
                print_error_exit(f"Some integration tests failed", status_code=-1)

        except SymlinkNeedsElevationError as sym:
            print_error(
                f"Error setting up integration tests. {sym}. Try again as an admin"
            )
Exemplo n.º 18
0
def test_should_pull() -> None:
    assert _should_pull_analyzer(
        SpecifiedAnalyzer(
            VersionedAnalyzer(AnalyzerName("doesnt/exist"), Version("9.1.1"))
        )
    )
Exemplo n.º 19
0
def push(ctx, analyzer_directory, force, squash, env_args_string):
    """
    Push the analyzer in the current directory to the R2C platform.

    You must log in to push analyzers.

    This command will validate your analyzer and privately publish your analyzer
    to your org with the name specified in analyzer.json.

    Your analyzer name must follow {org}/{name}.
    """
    env_args_dict = parse_remaining(env_args_string)

    manifest, analyzer_directory = find_and_open_analyzer_manifest(
        analyzer_directory, ctx
    )
    readme = find_and_open_analyzer_readme(analyzer_directory, ctx)
    analyzer_org = get_org_from_analyzer_name(manifest.analyzer_name)

    overwriting_message = (
        " and forcing overwrite if the analyzer version exists and is pending upload."
    )
    # TODO(ulzii): let's decide which source of truth we're using for analyzer_name above and/or check consistency.
    # can't have both dir name and what's in analyzer.json
    print_msg(
        f"📌 Pushing analyzer in {analyzer_directory}{overwriting_message if force else ''}..."
    )

    default_org = get_default_org()
    if default_org is None:
        print_error_exit(
            f"You are not logged in. Please run `r2c login` to be able to push your analyzer"
        )

    if default_org != analyzer_org:
        if analyzer_org != PLATFORM_ANALYZER_PREFIX:
            print_error_exit(
                f"You're logged in to the common r2c platform. The org specified as the prefix of your analyzer name in `analyzer.json` must be `{PLATFORM_ANALYZER_PREFIX}`. "
                + f"Replace `{analyzer_org}` with `{PLATFORM_ANALYZER_PREFIX}` and try again."
                + "Please ask for help from r2c support"
            )
    try:
        # upload analyzer.json
        artifact_link = _upload_analyzer_manifest(manifest, force, readme)
    except Exception as e:
        print_exception_exit("There was an error uploading your analyzer", e)
    if artifact_link is None:
        print_error_exit(
            "There was an error uploading your analyzer. Please ask for help from R2C support"
        )
    get_logger().info(f"using artifact link: {artifact_link}")
    # get docker login creds
    creds = get_docker_creds(artifact_link)
    if creds is None:
        print_error_exit(
            "There was an error getting Docker credentials. Please ask for help from R2C support"
        )
    else:
        print_success_step("Successfully fetched credentials.")

    # docker login
    successful_login = docker_login(creds)
    if not successful_login:
        print_error_exit(
            "There was an error logging into Docker. Please ask for help from R2C support"
        )
    else:
        print_success_step("Successfully logged in to docker.")

    print_msg("🔨 Building docker container")
    # docker build and tag
    abort_on_build_failure(
        build_docker(
            manifest.analyzer_name,
            manifest.version,
            os.path.relpath(analyzer_directory, os.getcwd()),
            env_args_dict=env_args_dict,
            squash=squash,
        )
    )
    # docker push
    image_id = VersionedAnalyzer(manifest.analyzer_name, manifest.version).image_id
    print_msg(f"Pushing docker container to `{analyzer_org}`")
    successful_push = _docker_push(image_id)
    if not successful_push:
        print_error_exit(
            "There was an error pushing the Docker image. Please ask for help from R2C support"
        )
    else:
        print_success_step("Successfully pushed to R2C.")
    # mark uploaded with API
    # TODO figure out how to determine org from analyzer.json
    try:
        uploaded_url = f"{get_base_url()}/api/v1/analyzers/{manifest.analyzer_name}/{manifest.version}/uploaded"
        r = auth_put(uploaded_url)
        data = handle_request_with_error_message(r)
        if data.get("status") == "uploaded":
            web_url = data["links"]["web_url"]
            # display status to user and give link to view in web UI
            print_success(
                f"Upload finished successfully for analyzer! Visit: {web_url}"
            )
        else:
            print_error_exit("Error confirming analyzer was successfully uploaded.")
    except Exception as e:
        print_exception_exit("Error confirming analyzer was successfully uploaded", e)