Ejemplo n.º 1
0
def update_policy(bundle, policyId):
    request_inputs = anchore_engine.services.common.do_request_prep(request, default_params={})
    user_auth = request_inputs['auth']
    method = request_inputs['method']
    bodycontent = request_inputs['bodycontent']
    params = request_inputs['params']

    return_object = {}
    httpcode = 500
    userId, pw = user_auth

    try:
        logger.debug("Updating policy")

        jsondata = json.loads(bodycontent)

        try:
            policy_records = catalog.get_policy(user_auth, policyId=policyId)
        except Exception as err:
            logger.warn("unable to get policy_records for user (" + str(userId) + ") - exception: " + str(err))
            policy_records = []

        if policy_records:
            policy_record = policy_records[0]
            if policy_record['active'] and not jsondata['active']:
                httpcode = 500
                raise Exception("cannot deactivate an active policy - can only activate an inactive policy")
            elif policyId != jsondata['policyId']:
                httpcode = 500
                raise Exception("policyId in route is different from policyId in payload")

            policy_record.update(jsondata)
            policy_record['policyId'] = policyId
            return_policy_record = catalog.update_policy(user_auth, policyId, policy_record=policy_record)
            return_object = [make_response_policy(user_auth, return_policy_record, params)]
            httpcode = 200
        else:
            httpcode = 404
            raise Exception("cannot locate specified policyId")
    except Exception as err:
        return_object = anchore_engine.services.common.make_response_error(err, in_httpcode=httpcode)
        httpcode = return_object['httpcode']
    return (return_object, httpcode)
Ejemplo n.º 2
0
def make_image_content_response(content_type, content_data):
    localconfig = anchore_engine.configuration.localconfig.get_config()
    all_content_types = localconfig.get("image_content_types",
                                        []) + localconfig.get(
                                            "image_metadata_types", [])

    if content_type not in all_content_types:
        logger.warn("input content_type (%s) not supported (%s)", content_type,
                    all_content_types)
        return []

    if not content_data:
        logger.warn(
            "empty content data given to format - returning empty result")
        return []

    builder = CONTENT_RESPONSE_BUILDER_DISPATCH.get(content_type,
                                                    _build_default_response)
    return builder(content_data)
Ejemplo n.º 3
0
def summary_observe(name, observation, description="", **kwargs):
    global metrics, enabled

    if not enabled:
        return (True)

    try:
        if name not in metrics:
            metrics[name] = Summary(name, description, kwargs.keys())

        if kwargs:
            metrics[name].labels(**kwargs).observe(observation)
        else:
            metrics[name].observe(observation)

    except Exception as err:
        logger.warn("adding metric failed - exception: " + str(err))

    return (True)
Ejemplo n.º 4
0
def get_registry_catalog_orig(userId, registry):
    ret = {}
    user = pw = None

    localconfig = anchore_engine.configuration.localconfig.get_config()

    try:
        creds = localconfig['credentials']['users'][userId][
            'registry_service_auths']['docker'][registry]['auth']
        user, pw = creds.split(":")
    except:
        pass

    if registry == 'docker.io':
        logger.warn("cannot currently get catalog repo list from dockerhub")
    else:
        ret = get_registry_catalog_docker_orig(registry, user=user, pw=pw)

    return (ret)
Ejemplo n.º 5
0
def run_feed_sync(system_user):
    all_ready = anchore_engine.clients.services.common.check_services_ready(
        ['simplequeue'])
    if not all_ready:
        logger.info("simplequeue service not yet ready, will retry")
        raise Exception('Simplequeue service not yet ready')
    else:
        try:
            # This has its own retry on the queue fetch, so wrap with catch block to ensure we don't double-retry on task exec
            simplequeue.run_target_with_queue_ttl(
                None,
                queue=feed_sync_queuename,
                target=do_feed_sync,
                max_wait_seconds=30,
                visibility_timeout=180,
                retries=FEED_SYNC_RETRIES,
                backoff_time=FEED_SYNC_RETRY_BACKOFF)
        except Exception as err:
            logger.warn("failed to process task this cycle: " + str(err))
Ejemplo n.º 6
0
def check_services_ready(servicelist):
    global scache

    all_ready = True
    try:
        for servicename in servicelist:
            logger.debug("checking service readiness: " + str(servicename))
            services = get_enabled_services(None, servicename)
            if not services:
                logger.warn("required service (" + str(servicename) +
                            ") is not (yet) available")
                all_ready = False
                break

    except Exception as err:
        logger.error("could not check service status - exception: " + str(err))
        all_ready = False

    return (all_ready)
Ejemplo n.º 7
0
def get_image_manifest_skopeo(url, registry, repo, intag=None, indigest=None, topdigest=None, user=None, pw=None, verify=True):
    manifest = {}
    digest = None
    testDigest = None

    if indigest:
        pullstring = registry + "/" + repo + "@" + indigest
    elif intag:
        pullstring = registry + "/" + repo + ":" + intag
    else:
        raise Exception("invalid input - must supply either an intag or indigest")

    try:
        try:
            rawmanifest = get_image_manifest_skopeo_raw(pullstring, user=user, pw=pw, verify=verify)
            digest = manifest_to_digest(rawmanifest)
            manifest = json.loads(rawmanifest)
            if not topdigest:
                topdigest = digest

            if manifest.get('schemaVersion') == 2 and manifest.get('mediaType') == 'application/vnd.docker.distribution.manifest.list.v2+json':
                # Get the arch-specific version for amd64 and linux
                new_digest = None
                for entry in manifest.get('manifests'):
                    platform = entry.get('platform')
                    if platform and platform.get('architecture') in ['amd64'] and platform.get('os') == 'linux':
                        new_digest = entry.get('digest')
                        break

                return get_image_manifest_skopeo(url=url, registry=registry, repo=repo, intag=None, indigest=new_digest, user=user, pw=pw, verify=verify, topdigest=topdigest)
        except Exception as err:
            logger.warn("CMD failed - exception: " + str(err))
            raise err

    except Exception as err:
        import traceback
        traceback.print_exc()
        raise err

    if not manifest or not digest:
        raise SkopeoError(msg="No digest/manifest from skopeo")

    return(manifest, digest, topdigest)
Ejemplo n.º 8
0
def counter_inc(name, step=1, description="", **kwargs):
    global metrics

    if not enabled:
        return (True)

    try:
        if name not in metrics:
            metrics[name] = Counter(name, description, list(kwargs.keys()))

        if kwargs:
            metrics[name].labels(**kwargs).inc(step)
        else:
            metrics[name].inc(step)

    except Exception as err:
        logger.warn("adding metric failed - exception: " + str(err))

    return (True)
Ejemplo n.º 9
0
def gauge_set(name, observation, description="", **kwargs):
    global metrics

    if not enabled:
        return (True)

    try:
        if name not in metrics:
            metrics[name] = Gauge(name, description, list(kwargs.keys()))

        if kwargs:
            metrics[name].labels(**kwargs).set(observation)
        else:
            metrics[name].set(observation)

    except Exception as err:
        logger.warn("adding metric failed - exception: " + str(err))

    return (True)
Ejemplo n.º 10
0
    def bind_for_cpe22_uri(element):
        if not isinstance(element, str):
            raise Exception("Value to be bound in URI format is not a string")

        if element == "*":
            return ""
        elif element in ["-", ""]:
            return element
        else:
            result = str()
            pos = -1
            while pos < (len(element) - 1):
                pos += 1
                char = element[pos]
                if char == "\\":  # an escaped character, percent encode it if possible
                    if pos != (
                        len(element) - 1
                    ):  # check the next character and transform into percent encoded string
                        pos += 1
                        n_char = element[pos]
                        encoded = CPE_SPECIAL_CHAR_ENCODER.get(n_char, None)
                        if encoded:
                            result += encoded
                        else:  # no encoding found, let it go through as it is
                            logger.warn(
                                "No encoding found for {}{}".format(char, n_char)
                            )
                            result += char + n_char
                    else:  # this is the last char, nothing to percent encode
                        logger.warn(
                            "{} is the last char, skipping percent encoded transformation".format(
                                char
                            )
                        )
                        result += char
                elif char == "?":  # bind the unescaped ? to %01
                    result += "%01"
                elif char == "*":  # bind the unescaped * to %02
                    result += "%02"
                else:
                    result += char

            return result
Ejemplo n.º 11
0
def make_response_vulnerability(vulnerability_type, vulnerability_data):
    ret = []

    if not vulnerability_data:
        logger.warn(
            "empty query data given to format - returning empty result")
        return (ret)

    if vulnerability_type == 'os':
        keymap = {
            'vuln': 'CVE_ID',
            'severity': 'Severity',
            'package': 'Vulnerable_Package',
            'fix': 'Fix_Available',
            'url': 'URL'
        }
        scan_result = vulnerability_data['legacy_report']
        try:
            for imageId in scan_result.keys():
                header = scan_result[imageId]['result']['header']
                rows = scan_result[imageId]['result']['rows']
                for row in rows:
                    el = {}
                    for k in keymap.keys():
                        try:
                            el[k] = row[header.index(keymap[k])]
                        except:
                            el[k] = None

                        # conversions
                        if el[k] == 'N/A':
                            el[k] = None

                    ret.append(el)
        except Exception as err:
            logger.warn("could not prepare query response - exception: " +
                        str(err))
            ret = []
    else:
        ret = vulnerability_data

    return (ret)
Ejemplo n.º 12
0
def delete_policy(policyId):
    request_inputs = anchore_engine.services.common.do_request_prep(request, default_params={})
    user_auth = request_inputs['auth']

    return_object = {}
    httpcode = 500
    userId, pw = user_auth

    try:
        logger.debug("Delete policy")

        try:
            try:
                policy_records = catalog.get_policy(user_auth, policyId=policyId)
            except Exception as err:
                logger.warn("unable to get policy_records for user (" + str(userId) + ") - exception: " + str(err))
                policy_records = []

            if not policy_records:
                rc = True
            else:
                policy_record = policy_records[0]
                if policy_record['active']:
                    httpcode = 500
                    raise Exception(
                        "cannot delete an active policy - activate a different policy then delete this one")

            rc = catalog.delete_policy(user_auth, policyId=policyId)
        except Exception as err:
            raise err

        if rc:
            httpcode = 200
            return_object = "deleted"
        else:
            httpcode = 500
            raise Exception('not deleted')
    except Exception as err:
        return_object = anchore_engine.services.common.make_response_error(err, in_httpcode=httpcode)
        httpcode = return_object['httpcode']

    return (return_object, httpcode)
Ejemplo n.º 13
0
def update_service_cache(user_auth, servicename, skipcache=False):
    global scache, scache_template

    fromCache = True
    if skipcache or servicename not in scache:
        scache[servicename] = copy.deepcopy(scache_template)
        fromCache = False

    if not scache[servicename]['records']:
        fromCache = False
    else:
        for record in scache[servicename]['records']:
            if not record['status']:
                fromCache = False

    if (time.time() -
            scache[servicename]['last_updated']) > scache[servicename]['ttl']:
        fromCache = False

    if not fromCache:
        # refresh the cache for this service from catalog call
        try:
            logger.debug("fetching services (" + str(servicename) + ")")
            with db.session_scope() as dbsession:
                service_records = db.db_services.get_byname(servicename,
                                                            session=dbsession)
            #service_records = get_service(user_auth, servicename=servicename)
            logger.debug("services fetched: " + str(service_records))
        except Exception as err:
            logger.warn("cannot get service: " + str(err))
            service_records = []

        if service_records:
            new_records = []
            for service_record in service_records:
                if service_record['status']:
                    new_records.append(service_record)

            scache[servicename]['records'] = new_records
            scache[servicename]['last_updated'] = time.time()

    return (fromCache)
def _rmtree_error_handler(infunc, inpath, inerr):
    (cls, exc, trace) = inerr
    try:
        # attempt to change the permissions and then retry removal
        os.chmod(inpath, 0o777)
    except Exception as err:
        logger.warn(
            "unable to change permissions in error handler for path {} in shutil.rmtree".format(
                inpath
            )
        )
    finally:
        try:
            infunc(inpath)
        except Exception as err:
            logger.debug(
                "unable to remove in error handler for path {} - this will be retried".format(
                    err
                )
            )
Ejemplo n.º 15
0
def handle_metrics(*args, **kwargs):

    cycle_timer = kwargs['mythread']['cycle_timer']
    while (True):
        try:
            for qname in anchore_engine.subsys.simplequeue.get_queuenames():
                try:
                    qlen = anchore_engine.subsys.simplequeue.qlen(qname)
                    anchore_engine.subsys.metrics.gauge_set(
                        "anchore_queue_length", qlen, queuename=qname)
                except:
                    logger.warn(
                        "could not get/set queue length metric for queue (" +
                        str(qname) + ")")
        except Exception as err:
            logger.warn("handler failed - exception: " + str(err))

        time.sleep(cycle_timer)

    return (True)
Ejemplo n.º 16
0
def handle_metrics(*args, **kwargs):

    cycle_timer = kwargs['mythread']['cycle_timer']

    while True:
        try:
            localconfig = anchore_engine.configuration.localconfig.get_config()
            try:
                tmpdir = localconfig['tmp_dir']
                svfs = os.statvfs(tmpdir)
                available_bytes = svfs.f_bsize * svfs.f_bavail
                anchore_engine.subsys.metrics.gauge_set("anchore_tmpspace_available_bytes", available_bytes)
            except Exception as err:
                logger.warn("unable to detect available bytes probe - exception: " + str(err))
        except Exception as err:
            logger.warn("handler failed - exception: " + str(err))

        time.sleep(cycle_timer)

    return True
Ejemplo n.º 17
0
def list_policies(active=None):
    """
    GET /policies?active=true|false
    :return:
    """

    # set up the filter based on input
    try:
        request_inputs = anchore_engine.apis.do_request_prep(connexion.request, default_params={})
        user_id = request_inputs['userId']

        with db.session_scope() as dbsession:
            if active is not None:
                records = db_policybundle.get_byfilter(user_id, session=dbsession, active=active)
            else:
                records = db_policybundle.get_byfilter(user_id, session=dbsession)

        if records:
            for record in records:
                record['policybundle'] = {}
                try:
                    policybundle = archive.get_document(user_id, 'policy_bundles', record['policyId'])
                    if policybundle:
                        record['policybundle'] = policybundle

                        record['policybundlemeta'] = {}
                        meta = archive.get_document_meta(user_id, 'policy_bundles', record['policyId'])
                        if meta:
                            record['policybundlemeta'] = meta

                except Exception as err:
                    logger.warn("failed to fetch policy bundle from archive - exception: " + str(err))
                    raise anchore_engine.common.helpers.make_anchore_exception(err,
                                                                               input_message="failed to fetch policy bundle from archive",
                                                                               input_httpcode=500)
        else:
            records = []

        return records, 200
    except Exception as err:
        return str(err), 500
Ejemplo n.º 18
0
    def makeService(self, options):

        try:
            logger.info('Initializing configuration')
            self._init_config(options)

            logger.info('Initializing logging')
            self._init_logging()

            self._check_enabled()

            #logger.enable_bootstrap_logging(self.tapname)

            assert (issubclass(self.service_cls, ApiService))
            self.anchore_service = self.service_cls(options=options)
            self.anchore_service.initialize(self.configuration)

            # application object
            application = service.Application("Service-" + '-'.join(self.anchore_service.name))
            self.twistd_service = service.MultiService()
            self.twistd_service.setServiceParent(application)

            if not os.environ.get('ANCHORE_ENGINE_DISABLE_MONITORS'):
                logger.info('Starting monitor thread')
                lc = self._get_api_monitor(self.anchore_service)
                lc.start(1)
            else:
                logger.warn('Skipped start of monitor threads due to ANCHORE_ENGINE_DISABLE_MONITORS set in environment')

            logger.info('Building api handlers')
            s = self._build_api_service()
            s.setServiceParent(self.twistd_service)

            return self.twistd_service

        except Exception as err:
            logger.error("cannot create/init/register service: " + self.service_cls.__service_name__ + " - exception: " + str(err))
            traceback.print_exc('Service init failure')
            raise Exception("cannot start service (see above for information)")
        finally:
            pass
Ejemplo n.º 19
0
    def map(self, record_json):
        # log.debug("V2 DBREC: {}".format(json.dumps(record_json)))

        # Copy it to ensure no lingering refs to source json doc
        record_json = copy.deepcopy(record_json)

        db_rec = NvdV2Metadata()
        db_rec.name = record_json.get('cve', {}).get('CVE_data_meta', {}).get('ID', None)
        db_rec.namespace_name = self.group
        db_rec.description = record_json.get('cve', {}).get('description', {}).get('description_data', [{}])[0].get('value', "")
        db_rec.cvss_v2 = record_json.get('cvss_v2', None)
        db_rec.cvss_v3 = record_json.get('cvss_v3', None)
        db_rec.severity = record_json.get('severity') if record_json.get('severity', None) else 'Unknown'
        db_rec.link = "https://nvd.nist.gov/vuln/detail/{}".format(db_rec.name)
        db_rec.references = record_json.get('external_references', [])

        db_rec.vulnerable_cpes = []
        for input_cpe in record_json.get('vulnerable_cpes', []):
            try:
                # "cpe:2.3:a:openssl:openssl:-:*:*:*:*:*:*:*",
                # TODO - handle cpe inputs with escaped characters
                # cpetoks = input_cpe.split(":")
                cpe_obj = CPE.from_cpe23_fs(input_cpe)
                newcpe = CpeV2Vulnerability()
                newcpe.feed_name = self.feed
                newcpe.part = cpe_obj.part
                newcpe.vendor = cpe_obj.vendor
                newcpe.product = cpe_obj.product
                newcpe.version = cpe_obj.version
                newcpe.update = cpe_obj.update
                newcpe.edition = cpe_obj.edition
                newcpe.language = cpe_obj.language
                newcpe.sw_edition = cpe_obj.sw_edition
                newcpe.target_sw = cpe_obj.target_sw
                newcpe.target_hw = cpe_obj.target_hw
                newcpe.other = cpe_obj.other
                db_rec.vulnerable_cpes.append(newcpe)
            except Exception as err:
                logger.warn("failed to convert vulnerable-software-list into database CPEV2 record - exception: " + str(err))

        return db_rec
Ejemplo n.º 20
0
    def update_grype_db(
            self, archive_checksum: str) -> Optional[GrypeDBEngineMetadata]:
        """
        Checks to ensure a new grype_db has been staged, and raises a ValueError if it has not. Otherwise
        this promotes the staged grype_db to the production grype_db, and unstages the previously-staged
        grype_db.

        To ensure that the caller is promoting the correct staged grype-db (ie the one it think it is
        promoting to production, this method is parameterized with archive_checksum, which must be supplied and
        match the archive_checksum stored in the staging engine metadata.
        """
        with self.write_lock_access():
            # Ensure a grype_db has been staged, and raise an error if not.
            if (not self._staging_grype_db_dir
                    and not self._staging_grype_db_version
                    and not self._staging_grype_db_session_maker):
                raise ValueError(
                    self.STAGED_GRYPE_DB_NOT_FOUND_ERROR_MESSAGE.format(
                        archive_checksum))
            else:
                staging_engine_metadata = self.get_grype_db_engine_metadata(
                    use_staging=True)

                if staging_engine_metadata.archive_checksum != archive_checksum:
                    logger.warn(
                        "Staged grype_db does not match the provide archive checksum: %s. "
                        + "Returning engine metadata for the staged grype_db",
                        archive_checksum,
                    )
                    return staging_engine_metadata
                else:
                    # Promote the staged grype_db to production
                    self._grype_db_dir = self._staging_grype_db_dir
                    self._grype_db_version = self._staging_grype_db_version
                    self._grype_db_session_maker = self._staging_grype_db_session_maker

                    # Unstage the previously-staged grype_db
                    self.unstage_grype_db()

        # Return the new production engine metadata as a data object
        return self.get_grype_db_engine_metadata(use_staging=False)
Ejemplo n.º 21
0
    def healthcheck(self):
        """
        Raises an exception on failure or returns True on success

        :return:
        """

        internal_check = external_check = False

        try:
            internal_check = super().healthcheck()
        except Exception as e:
            logger.error(
                'Caught exception from admin/native authz check: {}'.format(
                    str(e)))
            internal_check = False

        try:
            if not self.__external_authorizer__:
                logger.warn(
                    'Attempted health check for external authz handler but no client configured yet'
                )
                return False
            else:
                external_check = self.__external_authorizer__.client.healthcheck(
                )
        except Exception as e:
            logger.error(
                'Healthcheck for external authz handler caught exception: {}'.
                format(e))
            external_check = False

        logger.debug(
            'External authz healthcheck result: internal handler {}, external handler {}'
            .format(internal_check, external_check))
        if internal_check and external_check:
            return True
        else:
            raise Exception(
                'Internal authz check returned {}, External authz check returned {}'
                .format(internal_check, external_check))
Ejemplo n.º 22
0
def delete_policy(policyId):
    request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
    user_auth = request_inputs['auth']

    return_object = {}
    httpcode = 500
    userId, pw = user_auth

    try:
        logger.debug("Delete policy {}".format(policyId))
        client = internal_client_for(CatalogClient, request_inputs['userId'])
        try:
            try:
                policy_record = client.get_policy(policyId=policyId)
            except Exception as err:
                logger.warn("unable to get policy_records for user (" + str(userId) + ") - exception: " + str(err))
                raise err

            if not policy_record:
                rc = True
            else:
                if policy_record['active']:
                    httpcode = 500
                    raise Exception(
                        "cannot delete an active policy - activate a different policy then delete this one")

            rc = client.delete_policy(policyId=policyId)
        except Exception as err:
            raise err

        if rc:
            httpcode = 200
            return_object = "deleted"
        else:
            httpcode = 500
            raise Exception('not deleted')
    except Exception as err:
        return_object = anchore_engine.common.helpers.make_response_error(err, in_httpcode=httpcode)
        httpcode = return_object['httpcode']

    return (return_object, httpcode)
Ejemplo n.º 23
0
def queue_notification(userId, subscription_key, subscription_type, payload):
    
    localconfig = anchore_engine.configuration.localconfig.get_config()
    system_user_auth = localconfig['system_user_auth']

    rc = False
    try:
        nobj = {
            'userId': userId,
            'subscription_key': subscription_key,
            'notificationId': str(uuid.uuid4()),
        }
        if payload:
            nobj.update(payload)
        if not simplequeue.is_inqueue(system_user_auth, subscription_type, nobj):
            rc = simplequeue.enqueue(system_user_auth, subscription_type, nobj)
    except Exception as err:
        logger.warn("failed to create/enqueue notification")
        raise err

    return(rc)
    def _should_evaluate(self, cache_entry: CachedPolicyEvaluation):
        if cache_entry is None:
            metrics.counter_inc(name="anchore_policy_evaluation_cache_misses_notfound")
            return EvaluationCacheManager.CacheStatus.missing

        # The cached result is not for this exact bundle content, so result is invalid
        if cache_entry.bundle_id != self.bundle_id:
            log.warn("Unexpectedly got a cached evaluation for a different bundle id")
            metrics.counter_inc(name="anchore_policy_evaluation_cache_misses_notfound")
            return EvaluationCacheManager.CacheStatus.missing

        if cache_entry.bundle_digest == self.bundle_digest:
            # A feed sync has occurred since the eval was done or the image has been updated/reloaded, so inputs can have changed. Must be stale
            if self._inputs_changed(cache_entry.last_modified):
                metrics.counter_inc(name="anchore_policy_evaluation_cache_misses_stale")
                return EvaluationCacheManager.CacheStatus.stale
            else:
                return EvaluationCacheManager.CacheStatus.valid
        else:
            metrics.counter_inc(name="anchore_policy_evaluation_cache_misses_invalid")
            return EvaluationCacheManager.CacheStatus.invalid
Ejemplo n.º 25
0
def do_feed_sync(msg):
    if 'FeedsUpdateTask' not in locals():
        from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask

    if 'get_selected_feeds_to_sync' not in locals():
        from anchore_engine.services.policy_engine.engine.feeds import get_selected_feeds_to_sync

    handler_success = False
    timer = time.time()
    logger.info("FIRING: feed syncer")
    try:
        feeds = get_selected_feeds_to_sync(localconfig.get_config())
        logger.info('Syncing configured feeds: {}'.format(feeds))
        result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get('data'))

        if result is not None:
            handler_success = True
        else:
            logger.warn('Feed sync task marked as disabled, so skipping')
    except ValueError as e:
        logger.warn('Received msg of wrong type')
    except Exception as err:
        logger.warn("failure in feed sync handler - exception: " + str(err))

    if handler_success:
        anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer, function='do_feed_sync', status="success")
    else:
        anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer, function='do_feed_sync', status="fail")
Ejemplo n.º 26
0
def perform_analyze_nodocker(userId, manifest, image_record, registry_creds, layer_cache_enable=False, parent_manifest=None):
    ret_analyze = {}
    ret_query = {}

    localconfig = anchore_engine.configuration.localconfig.get_config()
    try:
        tmpdir = localconfig['tmp_dir']
    except Exception as err:
        logger.warn("could not get tmp_dir from localconfig - exception: " + str(err))
        tmpdir = "/tmp"

    use_cache_dir=None
    if layer_cache_enable:
        use_cache_dir = os.path.join(tmpdir, "anchore_layercache")

    # choose the first TODO possible more complex selection here
    try:
        image_detail = image_record['image_detail'][0]
        registry_manifest = manifest
        registry_parent_manifest = parent_manifest
        pullstring = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail['imageDigest']
        fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
        logger.debug("using pullstring ("+str(pullstring)+") and fulltag ("+str(fulltag)+") to pull image data")
    except Exception as err:
        image_detail = pullstring = fulltag = None
        raise Exception("failed to extract requisite information from image_record - exception: " + str(err))

    timer = int(time.time())
    logger.spew("timing: analyze start: " + str(int(time.time()) - timer))
    logger.info("performing analysis on image: " + str([userId, pullstring, fulltag]))

    logger.debug("obtaining anchorelock..." + str(pullstring))
    with anchore_engine.clients.localanchore_standalone.get_anchorelock(lockId=pullstring, driver='nodocker'):
        logger.debug("obtaining anchorelock successful: " + str(pullstring))
        analyzed_image_report, manifest_raw = localanchore_standalone.analyze_image(userId, registry_manifest, image_record, tmpdir, localconfig, registry_creds=registry_creds, use_cache_dir=use_cache_dir, parent_manifest=registry_parent_manifest)
        ret_analyze = analyzed_image_report

    logger.info("performing analysis on image complete: " + str(pullstring))

    return ret_analyze
Ejemplo n.º 27
0
def make_response_error(errmsg, in_httpcode=None, details=None):
    if details is None:
        details = {}
    if not in_httpcode:
        httpcode = 500
    else:
        httpcode = in_httpcode

    msg = str(errmsg)

    ret = {'message': msg, 'httpcode': int(httpcode), 'detail': details}
    if 'error_codes' not in ret['detail']:
        ret['detail']['error_codes'] = []

    if isinstance(errmsg, Exception):
        if 'anchore_error_json' in errmsg.__dict__:
            # Try to load it as json
            try:
                err_json = json.loads(
                    errmsg.__dict__.get('anchore_error_json', None))
            except ValueError:
                # Then it may just be a string, we cannot do anything with it
                logger.debug('Failed to parse anchore_error_json as json')
                return ret

            if {'message', 'httpcode', 'detail'}.issubset(set(err_json)):
                ret.update(err_json)

            try:
                if {'error_code'}.issubset(set(err_json)) and err_json.get(
                        'error_code', None):
                    if 'error_codes' not in ret['detail']:
                        ret['detail']['error_codes'] = []
                    ret['detail']['error_codes'].append(
                        err_json.get('error_code'))
            except KeyError:
                logger.warn(
                    "unable to marshal error details: source error {}".format(
                        errmsg.__dict__))
    return ret
Ejemplo n.º 28
0
def do_feed_sync(msg):
    if "FeedsUpdateTask" not in locals():
        from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask

    handler_success = False
    timer = time.time()
    logger.info("FIRING: feed syncer")
    try:
        result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get("data"))

        if result is not None:
            handler_success = True
        else:
            logger.warn("Feed sync task marked as disabled, so skipping")
    except ValueError as e:
        logger.warn("Received msg of wrong type")
    except Exception as err:
        logger.warn("failure in feed sync handler - exception: " + str(err))

    if handler_success:
        anchore_engine.subsys.metrics.summary_observe(
            "anchore_monitor_runtime_seconds",
            time.time() - timer,
            function="do_feed_sync",
            status="success",
        )
    else:
        anchore_engine.subsys.metrics.summary_observe(
            "anchore_monitor_runtime_seconds",
            time.time() - timer,
            function="do_feed_sync",
            status="fail",
        )
Ejemplo n.º 29
0
def load_policy_bundle_paths(src_dirs=None):
    global localconfig

    default_bundle_name = "anchore_default_bundle.json"

    # Get the dir containing policy bundles to put in the config
    policy_bundles_dir = localconfig["policy_bundles_dir"]

    # This value will typically == None, outside of automated tests
    if src_dirs == None:
        src_dirs = policy_bundle_source_dirs()

    try:
        if policy_bundles_dir and src_dirs:
            policy_bundles_dir_full_path = os.path.join(
                localconfig["service_dir"], policy_bundles_dir)
            if not os.path.exists(policy_bundles_dir_full_path):
                os.mkdir(policy_bundles_dir_full_path)

            policy_bundles = []
            for src_dir in src_dirs:
                for file_name in os.listdir(src_dir):
                    file = os.path.join(policy_bundles_dir_full_path,
                                        file_name)
                    policy_bundles.append({
                        "active": file_name == default_bundle_name,
                        "bundle_path": file,
                    })
                    copy_config_file(file, file_name, src_dir)
            localconfig["policy_bundles"] = policy_bundles
            return
        else:
            logger.warn(
                "No configured policy bundle dir was found, unable to load.")
            localconfig["policy_bundles"] = None
    except Exception as e:
        logger.warn(
            "Configured policy bundle dir at {} not found, unable to load. Exception: {}"
            .format(policy_bundles_dir, e))
        localconfig["policy_bundles"] = None
Ejemplo n.º 30
0
    def _configure(self):
        """
        Load service configuration

        :return:
        """
        logger.info('Loading and initializing global configuration')
        self._init_versions()

        self.configuration = self._get_service_configuration(self.global_configuration)
        self.instance_id = localconfig.get_host_id()
        self.fq_name = (self.name, self.instance_id)

        self.task_handlers_enabled = self.configuration.get('task_handlers_enabled', True)
        env_setting = not os.environ.get('ANCHORE_ENGINE_DISABLE_MONITORS', 'false').lower() == 'true'
        self.task_handlers_enabled = self.task_handlers_enabled and env_setting

        if not self.task_handlers_enabled:
            if env_setting:
                logger.warn('Task handlers disabled by setting ANCHORE_ENGINE_DISABLE_MONITORS in environment')
            else:
                logger.warn('Task handlers disabled by configuration file value')

        try:
            kick_timer = int(self.configuration['cycle_timer_seconds'])
        except:
            kick_timer = 1

        try:
            cycle_timers = {}
            cycle_timers.update(self.configuration['cycle_timers'])
        except:
            cycle_timers = {}

        self.monitor_kwargs['kick_timer'] = kick_timer
        self.monitor_kwargs['cycle_timers'] = cycle_timers
        self.monitor_kwargs['monitors'] = copy.deepcopy(self.__monitors__)
        self.monitor_kwargs['monitor_threads'] = self.monitor_threads
        self.monitor_kwargs['servicename'] = self.name
        logger.info('Configuration complete')