def get_anchorelock(lockId=None, driver=None):
    global anchorelock, anchorelocks
    ret = anchorelock

    # first, check if we need to update the anchore configs
    localconfig = anchore_engine.configuration.localconfig.get_config()

    if not driver or driver in ['localanchore']:
        if 'anchore_scanner_config' not in localconfig:
            localconfig['anchore_scanner_config'] = get_config()
            anchore_config = localconfig['anchore_scanner_config']
        anchore_config = localconfig['anchore_scanner_config']
        anchore_data_dir = anchore_config['anchore_data_dir']
    else:
        #anchore_data_dir = "/root/.anchore"
        anchore_data_dir = "{}/.anchore".format(os.getenv("HOME", "/tmp/anchoretmp"))
        if not os.path.exists(os.path.join(anchore_data_dir, 'conf')):
            try:
                os.makedirs(os.path.join(anchore_data_dir, 'conf'))
            except:
                pass

    try:
        for src,dst in [(localconfig['anchore_scanner_analyzer_config_file'], os.path.join(anchore_data_dir, 'conf', 'analyzer_config.yaml')), (os.path.join(localconfig['service_dir'], 'anchore_config.yaml'), os.path.join(anchore_data_dir, 'conf', 'config.yaml'))]:
            logger.debug("checking defaults against installed: " + src + " : " + dst)
            if os.path.exists(src):
                default_file = src
                installed_file = dst

                do_copy = False
                try:
                    same = filecmp.cmp(default_file, installed_file)
                    if not same:
                        do_copy = True
                except:
                    do_copy = True

                #if not filecmp.cmp(default_file, installed_file):
                if do_copy:
                    logger.debug("checking source yaml ("+str(default_file)+")")
                    # check that it is at least valid yaml before copying in place
                    with open(default_file, 'r') as FH:
                        yaml.safe_load(FH)

                    logger.info("copying new config into place: " + str(src) + " -> " + str(dst))
                    shutil.copy(default_file, installed_file)

    except Exception as err:
        logger.warn("could not check/install analyzer anchore configurations (please check yaml format of your configuration files), continuing with default - exception: " + str(err))

    if lockId:
        lockId = base64.encodebytes(lockId.encode('utf-8'))
        if lockId not in anchorelocks:
            anchorelocks[lockId] = threading.Lock()
        ret = anchorelocks[lockId]
        logger.spew("all locks: " + str(anchorelocks))
    else:
        ret = anchorelock

    return ret
Exemple #2
0
    def get_vulnerabilities_for_sbom(self, grype_sbom: str) -> json:
        """
        Use grype to scan the provided sbom for vulnerabilites.
        """
        # Get the read lock
        with self.read_lock_access():
            # Get env variables to run the grype scan with
            env_variables = self._get_env_variables()

            # Format and run the command. Grype supports piping in an sbom string
            cmd = "{}".format(self.GRYPE_SUB_COMMAND)

            logger.spew("Running grype with command: {} | {}".format(
                grype_sbom, self.GRYPE_SUB_COMMAND))

            try:
                stdout, _ = run_check(
                    shlex.split(cmd),
                    input_data=grype_sbom,
                    log_level="spew",
                    env=env_variables,
                )
            except CommandException as exc:
                logger.error(
                    "Exception running command: %s, stderr: %s",
                    cmd,
                    exc.stderr,
                )
                raise exc

            # Return the output as json
            return json.loads(stdout)
Exemple #3
0
def perform_analyze_nodocker(userId, manifest, image_record, registry_creds):
    ret_analyze = {}
    ret_query = {}

    localconfig = anchore_engine.configuration.localconfig.get_config()
    try:
        tmpdir = localconfig['tmp_dir']
    except Exception as err:
        logger.warn("could not get tmp_dir from localconfig - exception: " + str(err))
        tmpdir = "/tmp"

    # choose the first TODO possible more complex selection here
    try:
        image_detail = image_record['image_detail'][0]
        registry_manifest = manifest
        pullstring = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail['imageDigest']
        fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
        logger.debug("using pullstring ("+str(pullstring)+") and fulltag ("+str(fulltag)+") to pull image data")
    except Exception as err:
        image_detail = pullstring = fulltag = None
        raise Exception("failed to extract requisite information from image_record - exception: " + str(err))
        
    timer = int(time.time())
    logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))
    logger.info("performing analysis on image: " + str([userId, pullstring, fulltag]))

    logger.debug("obtaining anchorelock..." + str(pullstring))
    with localanchore.get_anchorelock(lockId=pullstring):
        logger.debug("obtaining anchorelock successful: " + str(pullstring))
        analyzed_image_report = localanchore_standalone.analyze_image(userId, registry_manifest, image_record, tmpdir, registry_creds=registry_creds)
        ret_analyze = analyzed_image_report

    logger.info("performing analysis on image complete: " + str(pullstring))

    return (ret_analyze)
def perform_analyze(
    account,
    manifest,
    image_record,
    registry_creds,
    layer_cache_enable=False,
    parent_manifest=None,
):
    ret_analyze = {}

    loaded_config = get_config()
    tmpdir = get_tempdir(loaded_config)

    use_cache_dir = None
    if layer_cache_enable:
        use_cache_dir = os.path.join(tmpdir, "anchore_layercache")

    # choose the first TODO possible more complex selection here
    try:
        image_detail = image_record["image_detail"][0]
        registry_manifest = manifest
        registry_parent_manifest = parent_manifest
        pullstring = (image_detail["registry"] + "/" + image_detail["repo"] +
                      "@" + image_detail["imageDigest"])
        fulltag = (image_detail["registry"] + "/" + image_detail["repo"] +
                   ":" + image_detail["tag"])
        logger.debug("using pullstring (" + str(pullstring) +
                     ") and fulltag (" + str(fulltag) + ") to pull image data")
    except Exception as err:
        image_detail = pullstring = fulltag = None
        raise Exception(
            "failed to extract requisite information from image_record - exception: "
            + str(err))

    timer = int(time.time())
    logger.spew("timing: analyze start: " + str(int(time.time()) - timer))
    logger.info("performing analysis on image: " +
                str([account, pullstring, fulltag]))

    logger.debug("obtaining anchorelock..." + str(pullstring))
    with anchore_engine.clients.localanchore_standalone.get_anchorelock(
            lockId=pullstring, driver="nodocker"):
        logger.debug("obtaining anchorelock successful: " + str(pullstring))
        logger.info("analyzing image: %s", pullstring)
        analyzed_image_report, manifest_raw = localanchore_standalone.analyze_image(
            account,
            registry_manifest,
            image_record,
            tmpdir,
            loaded_config,
            registry_creds=registry_creds,
            use_cache_dir=use_cache_dir,
            parent_manifest=registry_parent_manifest,
        )
        ret_analyze = analyzed_image_report

    logger.info("performing analysis on image complete: " + str(pullstring))

    return ret_analyze
Exemple #5
0
def run_check(cmd, input_data=None, log_level="debug", **kwargs):
    """
    Run a command (input required to be a list), log the output, and raise an
    exception if a non-zero exit status code is returned.
    """
    cmd = run_sanitize(cmd)

    try:
        if input_data is not None:
            logger.debug("running cmd: %s with piped input", " ".join(cmd))
            code, stdout, stderr = run_command_list_with_piped_input(
                cmd, input_data, **kwargs)
        else:
            logger.debug("running cmd: %s", " ".join(cmd))
            code, stdout, stderr = run_command_list(cmd, **kwargs)
    except FileNotFoundError:
        msg = "unable to run command. Executable does not exist or not availabe in path"
        raise CommandException(cmd, 1, "", "", msg=msg)

    try:
        stdout = stdout.decode("utf-8")
        stderr = stderr.decode("utf-8")
    except AttributeError:
        # it is a str already, no need to decode
        pass

    stdout_stream = stdout.splitlines()
    stderr_stream = stderr.splitlines()

    if log_level == "spew":
        # Some commands (like grype scanning) will generate enough output here that we
        # need to try to limit the impact of debug logging on system performance
        for line in stdout_stream:
            logger.spew("stdout: %s" %
                        line)  # safe formatting not available for spew
        for line in stderr_stream:
            logger.spew("stderr: %s" % line)
    else:  # Always log stdout and stderr as debug, unless spew is specified
        for line in stdout_stream:
            logger.debug("stdout: %s", line)
        for line in stderr_stream:
            logger.debug("stderr: %s", line)

    if code != 0:
        # When non-zero exit status returns, log stderr as error, but only when
        # the log level is higher (lower in Engine's interpretation) than debug.
        # XXX: engine mangles the logger, so this way of checking the level is
        # non-standard. This line should be:
        #     if logger.level > logging.debug:
        if logger.log_level < logger.log_level_map["DEBUG"]:
            for line in stderr_stream:
                logger.error(line)
        raise CommandException(cmd, code, stdout, stderr)

    return stdout, stderr
Exemple #6
0
def do_notify_webhook(user_record, notification):
    logger.spew("webhook notify user: "******"webhook notify user: "******"could not prepare notification as JSON - exception: " + str(err))

    webhooks = {}

    localconfig = anchore_engine.configuration.localconfig.get_config()
    if 'webhooks' in localconfig:
        webhooks.update(localconfig['webhooks'])

    if webhooks:
        rootuser = webhooks.pop('webhook_user', None)
        rootpw = webhooks.pop('webhook_pass', None)
        rootverify = webhooks.pop('ssl_verify', None)
            
        for ntype in [notification_type, 'general']:
            if ntype in webhooks:
                webhook = webhooks[ntype]
                
                user = webhook.pop('webhook_user', rootuser)
                pw = webhook.pop('webhook_pass', rootpw)
                verify = webhook.pop('ssl_verify', rootverify)

                if not user and not pw:
                    auth=None
                else:
                    auth = (user, pw)

                url = webhook['url']
                for subkey,subval in subvars:
                    url = url.replace(subkey, subval)

                try:
                    logger.debug("webhook post: " + str(url) + " : " + str(notification))
                    #result = http.post(url, data=payload, auth=auth, timeout=2.0, verify=verify)
                    result = http.anchy_post(url, data=payload, auth=auth, timeout=2.0, verify=verify)
                    logger.debug("webhook response: " + str(result))
                    return(True)
                except Exception as err:
                    raise Exception("failed to post notification to webhook - exception: " + str(err))
            
    logger.debug("warning: notification generated, but no matching webhook could be found in config to send it to - dropping notification")
    return(False)
Exemple #7
0
def _db_query_wrapper(query: sqlalchemy.orm.Query, get_all=True):
    """
    Logging wrapper on the query call to simplify debugging
    :param query:
    :param get_all:
    :return:
    """
    logger.spew("executing query: %s", str(query))
    # If get_all, then caller will iterate over results
    if get_all:
        return query.all()

    return query
def get_anchorelock(lockId=None):
    global anchorelock, anchorelocks
    ret = anchorelock

    # first, check if we need to update the anchore configs
    localconfig = anchore_engine.configuration.localconfig.get_config()
    if 'anchore_scanner_config' not in localconfig:
        localconfig['anchore_scanner_config'] = get_config()
        anchore_config = localconfig['anchore_scanner_config']
    anchore_config = localconfig['anchore_scanner_config']

    try:
        for src, dst in [(localconfig['anchore_scanner_analyzer_config_file'],
                          os.path.join(anchore_config['anchore_data_dir'],
                                       'conf', 'analyzer_config.yaml')),
                         (os.path.join(localconfig['service_dir'],
                                       'anchore_config.yaml'),
                          os.path.join(anchore_config['anchore_data_dir'],
                                       'conf', 'config.yaml'))]:
            logger.debug("checking defaults against installed: " + src +
                         " : " + dst)
            if os.path.exists(src) and os.path.exists(dst):
                default_file = src
                installed_file = dst
                if not filecmp.cmp(default_file, installed_file):
                    logger.debug("checking source yaml (" + str(default_file) +
                                 ")")
                    # check that it is at least valid yaml before copying in place
                    with open(default_file, 'r') as FH:
                        yaml.safe_load(FH)

                    logger.info("copying new config into place: " + str(src) +
                                " -> " + str(dst))
                    shutil.copy(default_file, installed_file)

    except Exception as err:
        logger.warn(
            "could not check/install analyzer anchore configurations (please check yaml format of your configuration files), continuing with default - exception: "
            + str(err))

    if lockId:
        lockId = lockId.encode('base64')
        if lockId not in anchorelocks:
            anchorelocks[lockId] = threading.Lock()
        ret = anchorelocks[lockId]
        logger.spew("all locks: " + str(anchorelocks))
    else:
        ret = anchorelock

    return (ret)
Exemple #9
0
def timer(label, log_level='debug'):
    t = time.time()
    try:
        yield
    finally:
        log_level = log_level.lower()
        if log_level == 'info':
            logger.info('Execution of {} took: {} seconds'.format(label, time.time() - t))
        elif log_level == 'warn':
            logger.warn('Execution of {} took: {} seconds'.format(label, time.time() - t))
        elif log_level == 'spew':
            logger.spew('Execution of {} took: {} seconds'.format(label, time.time() - t))
        else:
            logger.debug('Execution of {} took: {} seconds'.format(label, time.time() - t))
Exemple #10
0
def _load_config(config_option, validate_params=None):
    try:
        # config and init
        configfile = configdir = None
        if config_option:
            configdir = config_option
            configfile = os.path.join(config_option, 'config.yaml')

        localconfig.load_config(configdir=configdir, configfile=configfile, validate_params=validate_params)
        my_config = localconfig.get_config()
        my_config['myservices'] = []
        logger.spew("localconfig=" + json.dumps(my_config, indent=4, sort_keys=True))
        return my_config
    except Exception as err:
        logger.error("cannot load configuration: exception - " + str(err))
        raise err
Exemple #11
0
def filter_secdb_entries(
    image_distro: DistroNamespace, matches: List[str], db_manager: CpeDBQueryManager
) -> List[str]:
    """
    Execute the filtering functionality itself on the sets

    :param image_distro:
    :param matches: match list to filter
    :return: filtered match list
    """

    secdb_matched_cves = db_manager.matched_records_for_namespace(image_distro, matches)

    logger.spew("Secdb matched cves %s", secdb_matched_cves)
    unmatched = set(matches).difference(secdb_matched_cves)
    return list(unmatched)
Exemple #12
0
def policy_engine_image_load(client, imageUserId, imageId, imageDigest):

    resp = None

    try:
        request = ImageIngressRequest()
        request.user_id = imageUserId
        request.image_id = imageId
        request.fetch_url='catalog://'+str(imageUserId)+'/analysis_data/'+str(imageDigest)
        logger.debug("policy engine request (image add): " + str(request))
        resp = client.ingress_image(request)
        logger.spew("policy engine response (image add): " + str(resp))
    except Exception as err:
        logger.error("failed to add/check image: " + str(err))
        raise err

    return(resp)
Exemple #13
0
def session_scope():
    """Provide a transactional scope around a series of operations."""
    global Session
    session = Session()

    # session.connection(execution_options={'isolation_level': 'SERIALIZABLE'})

    logger.spew("DB: opening session: " + str(session))
    logger.spew("DB: call stack: \n" + '\n'.join(traceback.format_stack()))
    try:
        yield session
        session.commit()
        logger.spew("DB: committing session: " + str(session))
    except:
        logger.spew("DB: rollbacking session: " + str(session))
        session.rollback()
        raise
    finally:
        logger.spew("DB: closing session: " + str(session))
        session.close()
def perform_analyze_nodocker(userId, manifest, image_record, registry_creds, layer_cache_enable=False, parent_manifest=None):
    ret_analyze = {}
    ret_query = {}

    localconfig = anchore_engine.configuration.localconfig.get_config()
    try:
        tmpdir = localconfig['tmp_dir']
    except Exception as err:
        logger.warn("could not get tmp_dir from localconfig - exception: " + str(err))
        tmpdir = "/tmp"

    use_cache_dir=None
    if layer_cache_enable:
        use_cache_dir = os.path.join(tmpdir, "anchore_layercache")

    # choose the first TODO possible more complex selection here
    try:
        image_detail = image_record['image_detail'][0]
        registry_manifest = manifest
        registry_parent_manifest = parent_manifest
        pullstring = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail['imageDigest']
        fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
        logger.debug("using pullstring ("+str(pullstring)+") and fulltag ("+str(fulltag)+") to pull image data")
    except Exception as err:
        image_detail = pullstring = fulltag = None
        raise Exception("failed to extract requisite information from image_record - exception: " + str(err))

    timer = int(time.time())
    logger.spew("timing: analyze start: " + str(int(time.time()) - timer))
    logger.info("performing analysis on image: " + str([userId, pullstring, fulltag]))

    logger.debug("obtaining anchorelock..." + str(pullstring))
    with anchore_engine.clients.localanchore_standalone.get_anchorelock(lockId=pullstring, driver='nodocker'):
        logger.debug("obtaining anchorelock successful: " + str(pullstring))
        analyzed_image_report, manifest_raw = localanchore_standalone.analyze_image(userId, registry_manifest, image_record, tmpdir, localconfig, registry_creds=registry_creds, use_cache_dir=use_cache_dir, parent_manifest=registry_parent_manifest)
        ret_analyze = analyzed_image_report

    logger.info("performing analysis on image complete: " + str(pullstring))

    return ret_analyze
Exemple #15
0
def timer(label, log_level="debug"):
    t = time.time()
    try:
        yield
    finally:
        log_level = log_level.lower()
        if log_level == "info":
            logger.info(
                "Execution of {} took: {} seconds".format(label, time.time() - t)
            )
        elif log_level == "warn":
            logger.warn(
                "Execution of {} took: {} seconds".format(label, time.time() - t)
            )
        elif log_level == "spew":
            logger.spew(
                "Execution of {} took: {} seconds".format(label, time.time() - t)
            )
        else:
            logger.debug(
                "Execution of {} took: {} seconds".format(label, time.time() - t)
            )
Exemple #16
0
 def lookup(self, key):
     found = self.cache.get(key)
     if found and found[0] >= datetime.datetime.now():
         logger.spew('TTLCache {} hit for {}'.format(self.__hash__(), key))
         return found[1]
     elif found:
         self.cache.pop(key)
         logger.spew('TTLCache {} hit for {}'.format(self.__hash__(), key))
         return None
     else:
         logger.spew('TTLCache {} miss for {}'.format(self.__hash__(), key))
         return None
Exemple #17
0
    def _query_records_for_namespace(
            self, namespace: DistroNamespace,
            filter_ids: List[str]) -> List[Tuple[str]]:
        """
        Utility function to lookup all vulns for the namespace

        :param namespace_name: string namespace (e.g. "alpine:3.8")
        :param match_set: the list of cve ids to query against
        :return: iterable for Vulnerability records
        """

        # Chunk on length of match_set
        chunk_size = 100
        idx = 0
        vuln_ids = []
        match_count = len(filter_ids)

        logger.spew("Checking for sec db matches for %s", filter_ids)

        # Do a chunked query to ensure a long list of match_set doesn't break due to query length
        while idx < match_count:
            chunk = filter_ids[idx:idx + chunk_size]
            idx += chunk_size
            logger.spew(
                "Query chunk %s with namespace %s",
                chunk,
                namespace.like_namespace_names,
            )

            qry = self.db_session.query(Vulnerability.id).filter(
                Vulnerability.namespace_name.in_(
                    namespace.like_namespace_names),
                Vulnerability.id.in_(chunk),
            )

            result = _db_query_wrapper(qry, get_all=True)
            logger.spew("Raw result = %s", str(result))
            vuln_ids.extend(result)

        logger.spew("Found cve id matches for %s", vuln_ids)
        return vuln_ids
Exemple #18
0
    def _match_distro_packages_by_cpe(self, image: Image) -> List[CpeMatch]:
        """
        Returns list of tuples of (imagecpe, vulncpe) that are matches

        :param image:
        :return: list of tuples
        """
        logger.spew(
            "scanning os packages for cpe matches id=%s digest=%s",
            image.id,
            image.digest,
        )

        os_pkg_cpe_mappings = self._get_cpes_for_os_packages(image.packages)

        logger.spew("distro cpes: %s", os_pkg_cpe_mappings)

        os_cpes = [cpe for pkg, cpe in os_pkg_cpe_mappings]
        # Get the matches
        matches = self._query_cpe_matches(os_cpes)

        logger.spew(
            "pre-filter cpe distro findings: %s",
            [(match.image_cpe, match.vuln_cpe.vulnerability_id) for match in matches],
        )

        # Filter the matches if configured to do so
        if matches and self.exclude_distro_records:
            # Remove any matches that are for a CVE ID that is represented in the vendor vuln db, regardless of match status.
            matched_cve_ids = self.get_cve_ids(matches)
            filtered_matched_cve_ids = set(
                filter_secdb_entries(
                    image.distro_namespace_obj(), matched_cve_ids, self.db_manager
                )
            )

            matches = [
                match
                for match in matches
                if match.vuln_cpe.vulnerability_id in filtered_matched_cve_ids
            ]

        logger.debug(
            "post-filter cpe distro findings: %s",
            [
                (match.image_cpe.name, match.vuln_cpe.vulnerability_id)
                for match in matches
            ],
        )

        return matches
Exemple #19
0
def process_updated_vulnerability(db, vulnerability):
    """
    Update vulnerability matches for this vulnerability. This function will add objects to the db session but
    will not commit. The caller is expected to manage the session lifecycle.

    :param: item: The updated vulnerability object
    :param: db: The db session to use, should be valid and open
    :return: list of (user_id, image_id) that were affected
    """
    logger.spew("Processing CVE update for: {}".format(vulnerability.id))
    changed_images = []

    # Find any packages already matched with the CVE ID.
    current_affected = vulnerability.current_package_vulnerabilities(db)

    # May need to remove vuln from some packages.
    if vulnerability.is_empty():
        logger.spew(
            "Detected an empty CVE. Removing all existing matches on this CVE")

        # This is a flush, nothing can be vulnerable to this, so remove it from packages.
        if current_affected:
            logger.debug(
                "Detected {} existing matches on CVE {} to remove".format(
                    len(current_affected), vulnerability.id))

            for pkgVuln in current_affected:
                logger.debug("Removing match on image: {}/{}".format(
                    pkgVuln.pkg_user_id, pkgVuln.pkg_image_id))
                db.delete(pkgVuln)
                changed_images.append(
                    (pkgVuln.pkg_user_id, pkgVuln.pkg_image_id))
    else:
        # Find impacted images for the current vulnerability
        new_vulnerable_packages = [
            ImagePackageVulnerability.from_pair(x, vulnerability)
            for x in find_vulnerable_image_packages(vulnerability)
        ]
        unique_vuln_pkgs = set(new_vulnerable_packages)
        current_match = set(current_affected)

        if len(new_vulnerable_packages) > 0:
            logger.debug("Found {} packages vulnerable to cve {}".format(
                len(new_vulnerable_packages), vulnerability.id))
            logger.debug("Dedup matches from {} to {}".format(
                len(new_vulnerable_packages), len(unique_vuln_pkgs)))

        # Find the diffs of any packages that were vulnerable but are no longer.
        no_longer_affected = current_match.difference(unique_vuln_pkgs)
        possibly_updated = current_match.intersection(unique_vuln_pkgs)
        new_matches = unique_vuln_pkgs.difference(current_match)

        if len(no_longer_affected) > 0:
            logger.debug(
                "Found {} packages no longer vulnerable to cve {}".format(
                    len(no_longer_affected), vulnerability.id))
            for img_pkg_vuln in no_longer_affected:
                logger.debug(
                    "Removing old invalid match for pkg {} on cve {}".format(
                        img_pkg_vuln, vulnerability.id))
                db.delete(img_pkg_vuln)
            db.flush()

        for v in new_matches:
            logger.debug("Adding new vulnerability match: {}".format(v))
            db.add(v)
            changed_images.append((v.pkg_user_id, v.pkg_image_id))

        db.flush()

    logger.spew("Images changed for cve {}: {}".format(vulnerability.id,
                                                       changed_images))

    return changed_images
Exemple #20
0
def monitor(**kwargs):
    global monitor_thread
    try:
        donew = False
        if monitor_thread:
            if monitor_thread.isAlive():
                logger.spew("MON: thread still running")
            else:
                logger.spew("MON: thread stopped running")
                donew = True
                monitor_thread.join()
                logger.spew("MON: thread joined: " +
                            str(monitor_thread.isAlive()))
        else:
            logger.spew("MON: no thread")
            donew = True

        if donew:
            logger.spew("MON: starting")
            monitor_thread = threading.Thread(target=monitor_func,
                                              kwargs=kwargs)
            monitor_thread.start()
        else:
            logger.spew("MON: skipping")

    except Exception as err:
        logger.warn("MON thread start exception: " + str(err))
Exemple #21
0
def monitor_func(**kwargs):
    global click, running, last_run, queuename, system_user_auth

    timer = int(time.time())
    if click < 5:
        click = click + 1
        logger.debug("Analyzer starting in: " + str(5 - click))
        return (True)

    if round(time.time() - last_run) < kwargs['kick_timer']:
        logger.spew("timer hasn't kicked yet: " +
                    str(round(time.time() - last_run)) + " : " +
                    str(kwargs['kick_timer']))
        return (True)

    try:
        running = True
        last_run = time.time()
        logger.debug("FIRING: analyzer")

        localconfig = anchore_engine.configuration.localconfig.get_config()
        system_user_auth = localconfig['system_user_auth']

        if True:
            try:

                myconfig = localconfig['services']['analyzer']
                max_analyze_threads = int(myconfig.get('max_threads', 1))
                layer_cache_enable = myconfig.get('layer_cache_enable', False)
                layer_cache_dirty = False

                #try:
                #    max_analyze_threads = int(myconfig['max_threads'])
                #except:
                #    max_analyze_threads = 1

                logger.debug("max threads: " + str(max_analyze_threads))
                threads = []
                for i in range(0, max_analyze_threads):
                    qobj = simplequeue.dequeue(system_user_auth, queuename)
                    #if simplequeue.qlen(system_user_auth, queuename) > 0:
                    if qobj:
                        myqobj = copy.deepcopy(qobj)
                        logger.spew("incoming queue object: " + str(myqobj))
                        logger.debug("incoming queue task: " +
                                     str(myqobj.keys()))
                        logger.debug("starting thread")
                        athread = threading.Thread(target=process_analyzer_job,
                                                   args=(system_user_auth,
                                                         myqobj,
                                                         layer_cache_enable))
                        athread.start()
                        threads.append(athread)
                        logger.debug("thread started")
                        layer_cache_dirty = True
                    else:
                        logger.debug(
                            "analyzer queue is empty - no work this cycle")

                for athread in threads:
                    logger.debug("joining thread")
                    athread.join()
                    logger.debug("thread joined")

                # TODO - perform cache maint here, no analyzer threads running
                if layer_cache_enable and layer_cache_dirty:
                    logger.debug("running layer cache handler")
                    try:
                        handle_layer_cache()
                    except Exception as err:
                        logger.warn(
                            "layer cache management failed - exception: " +
                            str(err))

            except Exception as err:
                logger.error(str(err))
    except Exception as err:
        logger.error(str(err))
    finally:
        running = False
        logger.debug("FIRING DONE: analyzer: " + str(int(time.time()) - timer))

    return (True)
Exemple #22
0
def process_analyzer_job(system_user_auth, qobj, layer_cache_enable):
    global current_avg, current_avg_count

    timer = int(time.time())
    try:
        record = qobj['data']
        userId = record['userId']
        image_record = record['image_record']
        manifest = record['manifest']

        imageDigest = image_record['imageDigest']
        user_record = catalog.get_user(system_user_auth, userId)
        user_auth = (user_record['userId'], user_record['password'])

        # check to make sure image is still in DB
        try:
            image_records = catalog.get_image(user_auth,
                                              imageDigest=imageDigest)
            if image_records:
                image_record = image_records[0]
            else:
                raise Exception("empty image record from catalog")
        except Exception as err:
            logger.warn(
                "dequeued image cannot be fetched from catalog - skipping analysis ("
                + str(imageDigest) + ") - exception: " + str(err))
            return (True)

        logger.info("image dequeued for analysis: " + str(userId) + " : " +
                    str(imageDigest))

        try:
            logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))

            last_analysis_status = image_record['analysis_status']
            image_record[
                'analysis_status'] = anchore_engine.subsys.taskstate.working_state(
                    'analyze')
            rc = catalog.update_image(user_auth, imageDigest, image_record)

            # disable the webhook call for image state transistion to 'analyzing'
            #try:
            #    for image_detail in image_record['image_detail']:
            #        fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
            #        npayload = {
            #            'last_eval': {'imageDigest': imageDigest, 'analysis_status': last_analysis_status},
            #            'curr_eval': {'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status']},
            #        }
            #        rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload)
            #except Exception as err:
            #    logger.warn("failed to enqueue notification on image analysis state update - exception: " + str(err))

            # actually do analysis
            registry_creds = catalog.get_registry(user_auth)
            image_data = perform_analyze(userId,
                                         manifest,
                                         image_record,
                                         registry_creds,
                                         layer_cache_enable=layer_cache_enable)

            imageId = None
            try:
                imageId = image_data[0]['image']['imageId']
            except Exception as err:
                logger.warn(
                    "could not get imageId after analysis or from image record - exception: "
                    + str(err))

            logger.debug("archiving analysis data")
            rc = catalog.put_document(user_auth, 'analysis_data', imageDigest,
                                      image_data)

            if rc:
                try:
                    logger.debug("extracting image content data")
                    image_content_data = {}
                    for content_type in anchore_engine.services.common.image_content_types:
                        try:
                            image_content_data[
                                content_type] = anchore_engine.services.common.extract_analyzer_content(
                                    image_data, content_type)
                        except:
                            image_content_data[content_type] = {}

                    if image_content_data:
                        logger.debug("adding image content data to archive")
                        rc = catalog.put_document(user_auth,
                                                  'image_content_data',
                                                  imageDigest,
                                                  image_content_data)

                    try:
                        logger.debug(
                            "adding image analysis data to image_record")
                        anchore_engine.services.common.update_image_record_with_analysis_data(
                            image_record, image_data)

                    except Exception as err:
                        raise err

                except Exception as err:
                    logger.warn(
                        "could not store image content metadata to archive - exception: "
                        + str(err))

                logger.debug("adding image record to policy-engine service (" +
                             str(userId) + " : " + str(imageId) + ")")
                try:
                    if not imageId:
                        raise Exception(
                            "cannot add image to policy engine without an imageId"
                        )

                    localconfig = anchore_engine.configuration.localconfig.get_config(
                    )
                    verify = localconfig['internal_ssl_verify']

                    client = anchore_engine.clients.policy_engine.get_client(
                        user=system_user_auth[0],
                        password=system_user_auth[1],
                        verify_ssl=verify)

                    try:
                        logger.debug(
                            "clearing any existing record in policy engine for image: "
                            + str(imageId))
                        rc = client.delete_image(user_id=userId,
                                                 image_id=imageId)
                    except Exception as err:
                        logger.warn("exception on pre-delete - exception: " +
                                    str(err))

                    request = ImageIngressRequest()
                    request.user_id = userId
                    request.image_id = imageId
                    request.fetch_url = 'catalog://' + str(
                        userId) + '/analysis_data/' + str(imageDigest)
                    logger.debug("policy engine request: " + str(request))
                    resp = client.ingress_image(request)
                    logger.debug("policy engine image add response: " +
                                 str(resp))

                    try:
                        # force a fresh CVE scan
                        resp = client.get_image_vulnerabilities(
                            user_id=userId,
                            image_id=imageId,
                            force_refresh=True)
                    except Exception as err:
                        logger.warn(
                            "post analysis CVE scan failed for image: " +
                            str(imageId))

                except Exception as err:
                    raise Exception(
                        "adding image to policy-engine failed - exception: " +
                        str(err))

                logger.debug("updating image catalog record analysis_status")

                last_analysis_status = image_record['analysis_status']
                image_record[
                    'analysis_status'] = anchore_engine.subsys.taskstate.complete_state(
                        'analyze')
                rc = catalog.update_image(user_auth, imageDigest, image_record)

                try:
                    annotations = {}
                    try:
                        annotations = json.loads(
                            image_record.get('annotations', {}))
                    except Exception as err:
                        logger.warn(
                            "could not marshal annotations from json - exception: "
                            + str(err))

                    for image_detail in image_record['image_detail']:
                        fulltag = image_detail['registry'] + "/" + image_detail[
                            'repo'] + ":" + image_detail['tag']
                        last_payload = {
                            'imageDigest': imageDigest,
                            'analysis_status': last_analysis_status,
                            'annotations': annotations
                        }
                        curr_payload = {
                            'imageDigest': imageDigest,
                            'analysis_status': image_record['analysis_status'],
                            'annotations': annotations
                        }
                        npayload = {
                            'last_eval': last_payload,
                            'curr_eval': curr_payload,
                        }
                        if annotations:
                            npayload['annotations'] = annotations

                        rc = anchore_engine.subsys.notifications.queue_notification(
                            userId, fulltag, 'analysis_update', npayload)
                except Exception as err:
                    logger.warn(
                        "failed to enqueue notification on image analysis state update - exception: "
                        + str(err))

            else:
                raise Exception("analysis archive failed to store")

            logger.info("analysis complete: " + str(userId) + " : " +
                        str(imageDigest))

            logger.spew("TIMING MARK1: " + str(int(time.time()) - timer))

            try:
                run_time = float(time.time() - timer)
                current_avg_count = current_avg_count + 1.0
                new_avg = current_avg + (
                    (run_time - current_avg) / current_avg_count)
                current_avg = new_avg
            except:
                pass

        except Exception as err:
            logger.exception("problem analyzing image - exception: " +
                             str(err))
            image_record[
                'analysis_status'] = anchore_engine.subsys.taskstate.fault_state(
                    'analyze')
            image_record[
                'image_status'] = anchore_engine.subsys.taskstate.fault_state(
                    'image_status')
            rc = catalog.update_image(user_auth, imageDigest, image_record)

    except Exception as err:
        logger.warn("job processing bailed - exception: " + str(err))
        raise err

    return (True)
Exemple #23
0
def perform_analyze_localanchore(userId,
                                 manifest,
                                 image_record,
                                 registry_creds,
                                 layer_cache_enable=False):
    ret_analyze = {}

    localconfig = anchore_engine.configuration.localconfig.get_config()
    do_docker_cleanup = localconfig['cleanup_images']

    try:
        image_detail = image_record['image_detail'][0]
        registry_manifest = manifest
        pullstring = image_detail['registry'] + "/" + image_detail[
            'repo'] + "@" + image_detail['imageDigest']
        fulltag = image_detail['registry'] + "/" + image_detail[
            'repo'] + ":" + image_detail['tag']
        logger.debug("using pullstring (" + str(pullstring) +
                     ") and fulltag (" + str(fulltag) + ") to pull image data")
    except Exception as err:
        image_detail = pullstring = fulltag = None
        raise Exception(
            "failed to extract requisite information from image_record - exception: "
            + str(err))

    timer = int(time.time())
    logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))
    logger.debug("obtaining anchorelock..." + str(pullstring))
    with localanchore.get_anchorelock(lockId=pullstring):
        logger.debug("obtaining anchorelock successful: " + str(pullstring))

        logger.spew("TIMING MARK1: " + str(int(time.time()) - timer))
        logger.info("performing analysis on image: " + str(pullstring))

        # pull the digest, but also any tags associated with the image (that we know of) in order to populate the local docker image
        try:
            rc = localanchore.pull(userId,
                                   pullstring,
                                   image_detail,
                                   pulltags=True,
                                   registry_creds=registry_creds)
            if not rc:
                raise Exception("anchore analyze failed:")
            pullstring = re.sub("sha256:", "", rc['Id'])
            image_detail['imageId'] = pullstring
        except Exception as err:
            logger.error("error on pull: " + str(err))
            raise err

        logger.spew("TIMING MARK2: " + str(int(time.time()) - timer))

        # analyze!
        try:
            rc = localanchore.analyze(pullstring, image_detail)
            if not rc:
                raise Exception("anchore analyze failed:")
        except Exception as err:
            logger.error("error on analyze: " + str(err))
            raise err

        logger.spew("TIMING MARK3: " + str(int(time.time()) - timer))

        # get the result from anchore
        logger.debug("retrieving image data from anchore")
        try:
            image_data = localanchore.get_image_export(pullstring,
                                                       image_detail)
            if not image_data:
                raise Exception("anchore image data export failed:")
        except Exception as err:
            logger.error("error on image export: " + str(err))
            raise err

        logger.spew("TIMING MARK5: " + str(int(time.time()) - timer))

        try:
            logger.debug("removing image: " + str(pullstring))
            rc = localanchore.remove_image(pullstring,
                                           docker_remove=do_docker_cleanup,
                                           anchore_remove=True)
            logger.debug("removing image complete: " + str(pullstring))
        except Exception as err:
            raise err

        logger.spew("TIMING MARK6: " + str(int(time.time()) - timer))

    ret_analyze = image_data

    logger.info("performing analysis on image complete: " + str(pullstring))
    return (ret_analyze)
Exemple #24
0
def handle_image_analyzer(*args, **kwargs):
    """
    Processor for image analysis requests coming from the work queue

    :param args:
    :param kwargs:
    :return:
    """
    global system_user_auth, queuename, servicename

    cycle_timer = kwargs['mythread']['cycle_timer']

    localconfig = anchore_engine.configuration.localconfig.get_config()
    system_user_auth = localconfig['system_user_auth']

    threads = []
    layer_cache_dirty = True
    while (True):
        logger.debug("analyzer thread cycle start")
        try:
            myconfig = localconfig['services']['analyzer']
            max_analyze_threads = int(myconfig.get('max_threads', 1))
            layer_cache_enable = myconfig.get('layer_cache_enable', False)

            logger.debug("max threads: " + str(max_analyze_threads))
            q_client = internal_client_for(SimpleQueueClient, userId=None)

            if len(threads) < max_analyze_threads:
                logger.debug("analyzer has free worker threads {} / {}".format(
                    len(threads), max_analyze_threads))
                qobj = q_client.dequeue(queuename)
                if qobj:
                    logger.debug("got work from queue task Id: {}".format(
                        qobj.get('queueId', 'unknown')))
                    myqobj = copy.deepcopy(qobj)
                    logger.spew("incoming queue object: " + str(myqobj))
                    logger.debug("incoming queue task: " +
                                 str(list(myqobj.keys())))
                    logger.debug("starting thread")
                    athread = threading.Thread(target=process_analyzer_job,
                                               args=(system_user_auth, myqobj,
                                                     layer_cache_enable))
                    athread.start()
                    threads.append(athread)
                    logger.debug("thread started")
                    layer_cache_dirty = True
                else:
                    logger.debug(
                        "analyzer queue is empty - no work this cycle")
            else:
                logger.debug("all workers are busy")

            alive_threads = []
            while (threads):
                athread = threads.pop()
                if not athread.isAlive():
                    try:
                        logger.debug("thread completed - joining")
                        athread.join()
                        logger.debug("thread joined")
                    except Exception as err:
                        logger.warn("cannot join thread - exception: " +
                                    str(err))
                else:
                    alive_threads.append(athread)
            threads = alive_threads

            if layer_cache_enable and layer_cache_dirty and len(threads) == 0:
                logger.debug("running layer cache handler")
                try:
                    handle_layer_cache()
                    layer_cache_dirty = False
                except Exception as err:
                    logger.warn("layer cache management failed - exception: " +
                                str(err))

        except Exception as err:
            logger.exception('Failure in image analysis loop')

        logger.debug("analyzer thread cycle complete: next in " +
                     str(cycle_timer))
        time.sleep(cycle_timer)
    return (True)
Exemple #25
0
def monitor_func(**kwargs):
    global click, running, last_run, queuename, system_user_auth

    timer = int(time.time())
    if click < 5:
        click = click + 1
        logger.debug("Analyzer starting in: " + str(5 - click))
        return (True)

    if round(time.time() - last_run) < kwargs['kick_timer']:
        logger.spew("timer hasn't kicked yet: " +
                    str(round(time.time() - last_run)) + " : " +
                    str(kwargs['kick_timer']))
        return (True)

    try:
        running = True
        last_run = time.time()
        logger.debug("FIRING: analyzer")

        localconfig = anchore_engine.configuration.localconfig.get_config()
        system_user_auth = localconfig['system_user_auth']

        queues = simplequeue.get_queues(system_user_auth)
        if not queues:
            logger.warn(
                "could not get any queues from simplequeue client, cannot do any work"
            )
        elif queuename not in queues:
            logger.error(
                "connected to simplequeue, but could not find queue (" +
                queuename + "), cannot do any work")
        else:
            try:

                try:
                    myconfig = localconfig['services']['analyzer']
                    max_analyze_threads = int(myconfig['max_threads'])
                except:
                    max_analyze_threads = 1

                logger.debug("max threads: " + str(max_analyze_threads))
                threads = []
                for i in range(0, max_analyze_threads):
                    if simplequeue.qlen(system_user_auth, queuename) > 0:
                        qobj = simplequeue.dequeue(system_user_auth, queuename)
                        if qobj:
                            myqobj = copy.deepcopy(qobj)
                            logger.spew("incoming queue object: " +
                                        str(myqobj))
                            logger.debug("incoming queue task: " +
                                         str(myqobj.keys()))
                            logger.debug("starting thread")
                            athread = threading.Thread(
                                target=process_analyzer_job,
                                args=(
                                    system_user_auth,
                                    myqobj,
                                ))
                            athread.start()
                            threads.append(athread)
                            logger.debug("thread started")

                            # rc = process_analyzer_job(system_user_auth, myqobj)

                    else:
                        logger.debug(
                            "analyzer queue is empty - no work this cycle")

                for athread in threads:
                    logger.debug("joining thread")
                    athread.join()
                    logger.debug("thread joined")

            except Exception as err:
                logger.error(str(err))
    except Exception as err:
        logger.error(str(err))
    finally:
        running = False
        logger.debug("FIRING DONE: analyzer: " + str(int(time.time()) - timer))

    return (True)
Exemple #26
0
def process_analyzer_job(system_user_auth, qobj, layer_cache_enable):
    global servicename  #current_avg, current_avg_count

    timer = int(time.time())
    event = None
    try:
        logger.debug('dequeued object: {}'.format(qobj))

        record = qobj['data']
        userId = record['userId']
        imageDigest = record['imageDigest']
        manifest = record['manifest']

        # check to make sure image is still in DB
        catalog_client = internal_client_for(CatalogClient, userId)
        try:
            image_records = catalog_client.get_image(imageDigest=imageDigest)
            if image_records:
                image_record = image_records[0]
            else:
                raise Exception("empty image record from catalog")
        except Exception as err:
            logger.warn(
                "dequeued image cannot be fetched from catalog - skipping analysis ("
                + str(imageDigest) + ") - exception: " + str(err))
            return (True)

        logger.info("image dequeued for analysis: " + str(userId) + " : " +
                    str(imageDigest))
        if image_record[
                'analysis_status'] != anchore_engine.subsys.taskstate.base_state(
                    'analyze'):
            logger.debug(
                "dequeued image is not in base state - skipping analysis")
            return (True)

        try:
            logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))

            last_analysis_status = image_record['analysis_status']
            image_record[
                'analysis_status'] = anchore_engine.subsys.taskstate.working_state(
                    'analyze')
            rc = catalog_client.update_image(imageDigest, image_record)

            # disable the webhook call for image state transistion to 'analyzing'
            #try:
            #    for image_detail in image_record['image_detail']:
            #        fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
            #        npayload = {
            #            'last_eval': {'imageDigest': imageDigest, 'analysis_status': last_analysis_status},
            #            'curr_eval': {'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status']},
            #        }
            #        rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload)
            #except Exception as err:
            #    logger.warn("failed to enqueue notification on image analysis state update - exception: " + str(err))

            # actually do analysis
            registry_creds = catalog_client.get_registry()
            try:
                image_data = perform_analyze(
                    userId,
                    manifest,
                    image_record,
                    registry_creds,
                    layer_cache_enable=layer_cache_enable)
            except AnchoreException as e:
                event = events.AnalyzeImageFail(user_id=userId,
                                                image_digest=imageDigest,
                                                error=e.to_dict())
                raise

            imageId = None
            try:
                imageId = image_data[0]['image']['imageId']
            except Exception as err:
                logger.warn(
                    "could not get imageId after analysis or from image record - exception: "
                    + str(err))

            try:
                logger.debug("archiving analysis data")
                rc = catalog_client.put_document('analysis_data', imageDigest,
                                                 image_data)
            except Exception as e:
                err = CatalogClientError(
                    msg='Failed to upload analysis data to catalog', cause=e)
                event = events.ArchiveAnalysisFail(user_id=userId,
                                                   image_digest=imageDigest,
                                                   error=err.to_dict())
                raise err

            if rc:
                try:
                    logger.debug("extracting image content data")
                    image_content_data = {}
                    for content_type in anchore_engine.common.image_content_types + anchore_engine.common.image_metadata_types:
                        try:
                            image_content_data[
                                content_type] = anchore_engine.common.helpers.extract_analyzer_content(
                                    image_data,
                                    content_type,
                                    manifest=manifest)
                        except:
                            image_content_data[content_type] = {}

                    if image_content_data:
                        logger.debug("adding image content data to archive")
                        rc = catalog_client.put_document(
                            'image_content_data', imageDigest,
                            image_content_data)

                    try:
                        logger.debug(
                            "adding image analysis data to image_record")
                        anchore_engine.common.helpers.update_image_record_with_analysis_data(
                            image_record, image_data)

                    except Exception as err:
                        raise err

                except Exception as err:
                    import traceback
                    traceback.print_exc()
                    logger.warn(
                        "could not store image content metadata to archive - exception: "
                        + str(err))

                logger.debug("adding image record to policy-engine service (" +
                             str(userId) + " : " + str(imageId) + ")")
                try:
                    if not imageId:
                        raise Exception(
                            "cannot add image to policy engine without an imageId"
                        )

                    localconfig = anchore_engine.configuration.localconfig.get_config(
                    )
                    verify = localconfig['internal_ssl_verify']

                    pe_client = internal_client_for(PolicyEngineClient, userId)

                    try:
                        logger.debug(
                            "clearing any existing record in policy engine for image: "
                            + str(imageId))
                        rc = pe_client.delete_image(user_id=userId,
                                                    image_id=imageId)
                    except Exception as err:
                        logger.warn("exception on pre-delete - exception: " +
                                    str(err))

                    logger.info(
                        'Loading image into policy engine: {} {}'.format(
                            userId, imageId))
                    image_analysis_fetch_url = 'catalog://' + str(
                        userId) + '/analysis_data/' + str(imageDigest)
                    logger.debug("policy engine request: " +
                                 image_analysis_fetch_url)
                    resp = pe_client.ingress_image(userId, imageId,
                                                   image_analysis_fetch_url)
                    logger.debug("policy engine image add response: " +
                                 str(resp))

                except Exception as err:
                    newerr = PolicyEngineClientError(
                        msg='Adding image to policy-engine failed',
                        cause=str(err))
                    event = events.LoadAnalysisFail(user_id=userId,
                                                    image_digest=imageDigest,
                                                    error=newerr.to_dict())
                    raise newerr

                logger.debug("updating image catalog record analysis_status")

                last_analysis_status = image_record['analysis_status']
                image_record[
                    'analysis_status'] = anchore_engine.subsys.taskstate.complete_state(
                        'analyze')
                image_record['analyzed_at'] = int(time.time())
                rc = catalog_client.update_image(imageDigest, image_record)

                try:
                    annotations = {}
                    try:
                        if image_record.get('annotations', '{}'):
                            annotations = json.loads(
                                image_record.get('annotations', '{}'))
                    except Exception as err:
                        logger.warn(
                            "could not marshal annotations from json - exception: "
                            + str(err))

                    for image_detail in image_record['image_detail']:
                        fulltag = image_detail['registry'] + "/" + image_detail[
                            'repo'] + ":" + image_detail['tag']
                        last_payload = {
                            'imageDigest': imageDigest,
                            'analysis_status': last_analysis_status,
                            'annotations': annotations
                        }
                        curr_payload = {
                            'imageDigest': imageDigest,
                            'analysis_status': image_record['analysis_status'],
                            'annotations': annotations
                        }
                        npayload = {
                            'last_eval': last_payload,
                            'curr_eval': curr_payload,
                        }
                        if annotations:
                            npayload['annotations'] = annotations

                        rc = anchore_engine.subsys.notifications.queue_notification(
                            userId, fulltag, 'analysis_update', npayload)
                except Exception as err:
                    logger.warn(
                        "failed to enqueue notification on image analysis state update - exception: "
                        + str(err))

            else:
                err = CatalogClientError(
                    msg='Failed to upload analysis data to catalog',
                    cause='Invalid response from catalog API - {}'.format(
                        str(rc)))
                event = events.ArchiveAnalysisFail(user_id=userId,
                                                   image_digest=imageDigest,
                                                   error=err.to_dict())
                raise err

            logger.info("analysis complete: " + str(userId) + " : " +
                        str(imageDigest))

            logger.spew("TIMING MARK1: " + str(int(time.time()) - timer))

            try:
                run_time = float(time.time() - timer)
                #current_avg_count = current_avg_count + 1.0
                #new_avg = current_avg + ((run_time - current_avg) / current_avg_count)
                #current_avg = new_avg

                anchore_engine.subsys.metrics.histogram_observe(
                    'anchore_analysis_time_seconds',
                    run_time,
                    buckets=[
                        1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0,
                        1800.0, 3600.0
                    ],
                    status="success")
                #anchore_engine.subsys.metrics.counter_inc('anchore_images_analyzed_total')

                #localconfig = anchore_engine.configuration.localconfig.get_config()
                #service_record = {'hostid': localconfig['host_id'], 'servicename': servicename}
                #anchore_engine.subsys.servicestatus.set_status(service_record, up=True, available=True, detail={'avg_analysis_time_sec': current_avg, 'total_analysis_count': current_avg_count}, update_db=True)

            except Exception as err:
                logger.warn(str(err))
                pass

        except Exception as err:
            run_time = float(time.time() - timer)
            logger.exception("problem analyzing image - exception: " +
                             str(err))
            anchore_engine.subsys.metrics.histogram_observe(
                'anchore_analysis_time_seconds',
                run_time,
                buckets=[
                    1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0,
                    3600.0
                ],
                status="fail")
            image_record[
                'analysis_status'] = anchore_engine.subsys.taskstate.fault_state(
                    'analyze')
            image_record[
                'image_status'] = anchore_engine.subsys.taskstate.fault_state(
                    'image_status')
            rc = catalog_client.update_image(imageDigest, image_record)
        finally:
            if event:
                try:
                    catalog_client.add_event(event)
                except:
                    logger.error(
                        'Ignoring error creating analysis failure event')

    except Exception as err:
        logger.warn("job processing bailed - exception: " + str(err))
        raise err

    return (True)
def perform_analyze(userId, pullstring, fulltag, image_detail, registry_creds):
    ret_analyze = {}
    ret_query = {}

    localconfig = anchore_engine.configuration.localconfig.get_config()
    do_docker_cleanup = localconfig['cleanup_images']

    timer = int(time.time())
    logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))
    logger.debug("obtaining anchorelock..." + str(pullstring))
    with localanchore.get_anchorelock(lockId=pullstring):
        logger.debug("obtaining anchorelock successful: " + str(pullstring))

        logger.spew("TIMING MARK1: " + str(int(time.time()) - timer))
        logger.info("performing analysis on image: " + str(pullstring))

        # pull the digest, but also any tags associated with the image (that we know of) in order to populate the local docker image
        try:
            rc = localanchore.pull(userId, pullstring, image_detail, pulltags=True,
                                                          registry_creds=registry_creds)
            if not rc:
                raise Exception("anchore analyze failed:")
            pullstring = re.sub("sha256:", "", rc['Id'])
            image_detail['imageId'] = pullstring
        except Exception as err:
            logger.error("error on pull: " + str(err))
            raise err

        logger.spew("TIMING MARK2: " + str(int(time.time()) - timer))

        # analyze!
        try:
            rc = localanchore.analyze(pullstring, image_detail)
            if not rc:
                raise Exception("anchore analyze failed:")
        except Exception as err:
            logger.error("error on analyze: " + str(err))
            raise err

        logger.spew("TIMING MARK3: " + str(int(time.time()) - timer))

        # query!
        try:
            query_data = localanchore.run_queries(pullstring, image_detail)
            if not query_data:
                raise Exception("anchore queries failed:")
        except Exception as err:
            logger.error("error on run_queries: " + str(err))
            raise err

        logger.spew("TIMING MARK4: " + str(int(time.time()) - timer))

        # get the result from anchore
        logger.debug("retrieving image data from anchore")
        try:
            image_data = localanchore.get_image_export(pullstring, image_detail)
            if not image_data:
                raise Exception("anchore image data export failed:")
        except Exception as err:
            logger.error("error on image export: " + str(err))
            raise err

        logger.spew("TIMING MARK5: " + str(int(time.time()) - timer))

        try:
            logger.debug("removing image: " + str(pullstring))
            rc = localanchore.remove_image(pullstring, docker_remove=do_docker_cleanup,
                                                                  anchore_remove=True)
            logger.debug("removing image complete: " + str(pullstring))
        except Exception as err:
            raise err

        logger.spew("TIMING MARK6: " + str(int(time.time()) - timer))

    ret_analyze = image_data
    ret_query = query_data

    logger.info("performing analysis on image complete: " + str(pullstring))
    return (ret_analyze, ret_query)
def process_analyzer_job(system_user_auth, qobj):
    global current_avg, current_avg_count

    timer = int(time.time())
    try:
        record = qobj['data']
        userId = record['userId']
        image_record = record['image_record']
        imageDigest = image_record['imageDigest']
        user_record = catalog.get_user(system_user_auth, userId)
        user_auth = (user_record['userId'], user_record['password'])

        # check to make sure image is still in DB
        try:
            image_records = catalog.get_image(user_auth, imageDigest=imageDigest)
            if image_records:
                image_record = image_records[0]
            else:
                raise Exception("empty image record from catalog")
        except Exception as err:
            logger.warn("dequeued image cannot be fetched from catalog - skipping analysis (" + str(
                imageDigest) + ") - exception: " + str(err))
            return (True)

        logger.info("image dequeued for analysis: " + str(userId) + " : " + str(imageDigest))

        try:
            logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))
            image_record['analysis_status'] = anchore_engine.subsys.taskstate.working_state('analyze')
            rc = catalog.update_image(user_auth, imageDigest, image_record)

            # actually do analysis

            # for pullstring in pullstrings.keys():
            for image_detail in image_record['image_detail']:
                pullstring = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail['digest']
                fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']

                imageId = None
                if 'imageId' in image_detail and image_detail['imageId']:
                    imageId = image_detail['imageId']

                logger.info("analysis starting: " + str(userId) + " : " + str(imageDigest) + " : " + str(fulltag) + " : " + str(imageId))

                logger.spew("TIMING MARKX: " + str(int(time.time()) - timer))

                registry_creds = catalog.get_registry(user_auth)

                image_data, query_data = perform_analyze(userId, pullstring, fulltag, image_detail, registry_creds)
                logger.spew("TIMING MARKY: " + str(int(time.time()) - timer))

                logger.debug("archiving query data")
                rc = catalog.put_document(user_auth, 'query_data', imageDigest, query_data)
                if rc:
                    logger.debug("storing image query data to catalog")
                else:
                    raise Exception("query archive failed to store")

                if not imageId:
                    try:
                        imageId = image_data[0]['image']['imageId']
                    except Exception as err:
                        logger.warn("could not get imageId after analysis or from image record - exception: " + str(err))

                logger.debug("archiving analysis data")
                rc = catalog.put_document(user_auth, 'analysis_data', imageDigest, image_data)
                if rc:
                    try:
                        logger.debug("extracting image content data")
                        image_content_data = {}
                        for content_type in anchore_engine.services.common.image_content_types:
                            try:
                                image_content_data[content_type] = anchore_engine.services.common.extract_analyzer_content(image_data, content_type)
                            except:
                                image_content_data[content_type] = {}

                        if image_content_data:
                            logger.debug("adding image content data to archive")
                            rc = catalog.put_document(user_auth, 'image_content_data', imageDigest, image_content_data)

                        image_summary_data = {}
                        try:
                            image_summary_data = anchore_engine.services.common.extract_analyzer_content(image_data, 'metadata')
                        except:
                            image_summary_data = {}
                        if image_summary_data:
                            logger.debug("adding image summary data to archive")
                            rc = catalog.put_document(user_auth, 'image_summary_data', imageDigest, image_summary_data)

                    except Exception as err:
                        logger.warn("could not store image content metadata to archive - exception: " + str(err))

                    logger.debug("adding image record to policy-engine service (" + str(userId) + " : " + str(imageId) + ")")
                    try:
                        if not imageId:
                            raise Exception("cannot add image to policy engine without an imageId")

                        localconfig = anchore_engine.configuration.localconfig.get_config()
                        verify = localconfig['internal_ssl_verify']

                        client = anchore_engine.clients.policy_engine.get_client(user=system_user_auth[0], password=system_user_auth[1], verify_ssl=verify)

                        try:
                            logger.debug("clearing any existing record in policy engine for image: " + str(imageId))
                            rc = client.delete_image(user_id=userId, image_id=imageId)
                        except Exception as err:
                            logger.warn("exception on pre-delete - exception: " + str(err))

                        request = ImageIngressRequest()
                        request.user_id = userId
                        request.image_id = imageId
                        request.fetch_url='catalog://'+str(userId)+'/analysis_data/'+str(imageDigest)
                        logger.debug("policy engine request: " + str(request))
                        resp = client.ingress_image(request)
                        logger.debug("policy engine image add response: " + str(resp))
                        try:
                            # force a fresh CVE scan
                            resp = client.get_image_vulnerabilities(user_id=userId, image_id=imageId, force_refresh=True)
                        except Exception as err:
                            logger.warn("post analysis CVE scan failed for image: " + str(imageId))

                    except Exception as err:
                        raise Exception("adding image to policy-engine failed - exception: " + str(err))

                    logger.debug("updating image catalog record analysis_status")
                    image_record['analysis_status'] = anchore_engine.subsys.taskstate.complete_state('analyze')
                    rc = catalog.update_image(user_auth, imageDigest, image_record)
                else:
                    raise Exception("analysis archive failed to store")

                logger.info("analysis complete: " + str(userId) + " : " + str(imageDigest) + " : " + str(fulltag))
            logger.spew("TIMING MARK1: " + str(int(time.time()) - timer))

            try:
                run_time = float(time.time() - timer)
                current_avg_count = current_avg_count + 1.0
                new_avg = current_avg + ((run_time - current_avg) / current_avg_count)
                current_avg = new_avg
            except:
                pass

        except Exception as err:
            logger.exception("problem analyzing image - exception: " + str(err))
            image_record['analysis_status'] = anchore_engine.subsys.taskstate.fault_state('analyze')
            image_record['image_status'] = anchore_engine.subsys.taskstate.fault_state('image_status')
            rc = catalog.update_image(user_auth, imageDigest, image_record)

    except Exception as err:
        logger.warn("job processing bailed - exception: " + str(err))
        raise err

    return (True)
Exemple #29
0
def default_monitor_func(**kwargs):
    """
    Generic monitor thread function for invoking tasks defined in a monitor dict

    :param kwargs:
    :return:
    """
    global click, running, last_run

    my_monitors = kwargs["monitors"]
    monitor_threads = kwargs["monitor_threads"]
    servicename = kwargs["servicename"]

    timer = int(time.time())
    if click < 5:
        click = click + 1
        logger.debug(
            "service (" + str(servicename) + ") starting in: " + str(5 - click)
        )
        return True

    if round(time.time() - last_run) < kwargs["kick_timer"]:
        logger.spew(
            "timer hasn't kicked yet: "
            + str(round(time.time() - last_run))
            + " : "
            + str(kwargs["kick_timer"])
        )
        return True

    try:
        running = True
        last_run = time.time()

        # handle setting the cycle timers based on configuration
        for monitor_name in list(my_monitors.keys()):
            if not my_monitors[monitor_name]["initialized"]:
                # first time
                if "cycle_timers" in kwargs and monitor_name in kwargs["cycle_timers"]:
                    try:
                        the_cycle_timer = my_monitors[monitor_name]["cycle_timer"]
                        min_cycle_timer = my_monitors[monitor_name]["min_cycle_timer"]
                        max_cycle_timer = my_monitors[monitor_name]["max_cycle_timer"]

                        config_cycle_timer = int(kwargs["cycle_timers"][monitor_name])
                        if config_cycle_timer < 0:
                            the_cycle_timer = abs(int(config_cycle_timer))
                        elif config_cycle_timer == 0:
                            my_monitors[monitor_name]["enabled"] = False
                            logger.debug(
                                "monitor '{}' has been explicitly disabled in config".format(
                                    monitor_name
                                )
                            )
                        elif config_cycle_timer < min_cycle_timer:
                            logger.warn(
                                "configured cycle timer for handler ("
                                + str(monitor_name)
                                + ") is less than the allowed min ("
                                + str(min_cycle_timer)
                                + ") - using allowed min"
                            )
                            the_cycle_timer = min_cycle_timer
                        elif config_cycle_timer > max_cycle_timer:
                            logger.warn(
                                "configured cycle timer for handler ("
                                + str(monitor_name)
                                + ") is greater than the allowed max ("
                                + str(max_cycle_timer)
                                + ") - using allowed max"
                            )
                            the_cycle_timer = max_cycle_timer
                        else:
                            the_cycle_timer = config_cycle_timer

                        my_monitors[monitor_name]["cycle_timer"] = the_cycle_timer
                    except Exception as err:
                        logger.warn(
                            "exception setting custom cycle timer for handler ("
                            + str(monitor_name)
                            + ") - using default"
                        )

                my_monitors[monitor_name]["initialized"] = True

        # handle the thread (re)starters here
        for monitor_name in list(my_monitors.keys()):
            if my_monitors[monitor_name].get("enabled", True):
                start_thread = False
                if monitor_name not in monitor_threads:
                    start_thread = True
                else:
                    if not monitor_threads[monitor_name].isAlive():
                        logger.debug(
                            "thread stopped - restarting: " + str(monitor_name)
                        )
                        monitor_threads[monitor_name].join()
                        start_thread = True

                if start_thread:
                    monitor_threads[monitor_name] = threading.Thread(
                        target=my_monitors[monitor_name]["handler"],
                        args=my_monitors[monitor_name]["args"],
                        kwargs={"mythread": my_monitors[monitor_name]},
                    )
                    logger.debug("starting up monitor_thread: " + str(monitor_name))
                    monitor_threads[monitor_name].start()

    except Exception as err:
        logger.error(str(err))
    finally:
        running = False

    return True
def anchore_login(anchore_user, anchore_pw):
    try:
        cmd = ['anchore', '--json', 'whoami']
        logger.debug("running anchore whoami check")

        sout = None
        current_loggedin_user = None
        try:
            sout = subprocess.check_output(cmd)
            logger.debug("whoami output: " + str(sout))
            whoami_output = json.loads(sout)
            if whoami_output and 'Current user' in whoami_output:
                current_loggedin_user = whoami_output['Current user']
        except subprocess.CalledProcessError as err:
            logger.warn("whoami failed: " + str(err))
        except Exception as err:
            logger.warn("whoami failed: " + str(err))

        if current_loggedin_user:
            skiplogin = False
            if current_loggedin_user == anchore_user:
                skiplogin = True
            elif "_id" in current_loggedin_user and current_loggedin_user[
                    '_id'] == anchore_user:
                skiplogin = True

            if skiplogin:
                logger.debug("already logged in as user (" +
                             str(anchore_user) + "), skipping login")
                return (True)

        #try:
        #    anchore_logout()
        #except:
        #    pass

        if anchore_user:
            os.environ['ANCHOREUSER'] = anchore_user
        else:
            try:
                del os.environ['ANCHOREUSER']
            except:
                pass

        if anchore_pw:
            os.environ['ANCHOREPASS'] = anchore_pw
        else:
            try:
                del os.environ['ANCHOREPASS']
            except:
                pass

        logger.spew("logging into anchore.io as user: "******" : " + str(anchore_pw))
        logger.debug("logging into anchore.io as user: "******"running login: "******"login output: " + str(sout))
        except subprocess.CalledProcessError as err:
            logger.debug("login failed: " + str(err))
            raise Exception("login failed: " + str(err.output))
        except Exception as err:
            logger.debug("login failed: " + str(err))
            raise err
    except Exception as err:
        logger.error("anchore login failed (" + str(anchore_user) +
                     ") - exception: " + str(err))
        raise err
    finally:
        try:
            del os.environ['ANCHOREUSER']
            del os.environ['ANCHOREPASS']
        except:
            pass