Esempio n. 1
0
    def run_feeds_update(cls, json_obj=None, force_flush=False):
        """
        Creates a task and runs it, optionally with a thread if locking is enabled.

        :return:
        """
        error = None
        feeds = None

        try:
            system_user = get_system_user_auth()
            feeds = get_selected_feeds_to_sync(localconfig.get_config())
            if json_obj:
                task = cls.from_json(json_obj)
                if not task:
                    return None
                task.feeds = feeds
            else:
                task = FeedsUpdateTask(feeds_to_sync=feeds, flush=force_flush)

            # Create feed task begin event
            try:
                catalog.add_event(system_user, FeedSyncStart(groups=feeds if feeds else 'all'))
            except:
                log.exception('Ignoring event generation error before feed sync')

            result = []
            if cls.locking_enabled:
                # system_user = get_system_user_auth()
                run_target_with_lease(user_auth=system_user, lease_id='feed_sync', ttl=90, target=lambda: result.append(task.execute()))
                # A bit of work-around for the lambda def to get result from thread execution
                if result:
                    result = result[0]
            else:
                result = task.execute()

            return result
        except LeaseAcquisitionFailedError as ex:
            error = ex
            log.exception('Could not acquire lock on feed sync, likely another sync already in progress')
            raise Exception('Cannot execute feed sync, lock is held by another feed sync in progress')
        except Exception as e:
            error = e
            log.exception('Error executing feeds update')
            raise e
        finally:
            # log feed sync event
            try:
                if error:
                    catalog.add_event(system_user, FeedSyncFail(groups=feeds if feeds else 'all', error=error))
                else:
                    catalog.add_event(system_user, FeedSyncComplete(groups=feeds if feeds else 'all'))
            except:
                log.exception('Ignoring event generation error after feed sync')
Esempio n. 2
0
def process_analyzer_job(system_user_auth, qobj, layer_cache_enable):
    global servicename  #current_avg, current_avg_count

    timer = int(time.time())
    event = None
    try:
        logger.debug('dequeued object: {}'.format(qobj))

        record = qobj['data']
        userId = record['userId']
        imageDigest = record['imageDigest']
        manifest = record['manifest']

        user_record = catalog.get_user(system_user_auth, userId)
        user_auth = (user_record['userId'], user_record['password'])

        # check to make sure image is still in DB
        try:
            image_records = catalog.get_image(user_auth,
                                              imageDigest=imageDigest)
            if image_records:
                image_record = image_records[0]
            else:
                raise Exception("empty image record from catalog")
        except Exception as err:
            logger.warn(
                "dequeued image cannot be fetched from catalog - skipping analysis ("
                + str(imageDigest) + ") - exception: " + str(err))
            return (True)

        logger.info("image dequeued for analysis: " + str(userId) + " : " +
                    str(imageDigest))
        if image_record[
                'analysis_status'] != anchore_engine.subsys.taskstate.base_state(
                    'analyze'):
            logger.debug(
                "dequeued image is not in base state - skipping analysis")
            return (True)

        try:
            logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))

            last_analysis_status = image_record['analysis_status']
            image_record[
                'analysis_status'] = anchore_engine.subsys.taskstate.working_state(
                    'analyze')
            rc = catalog.update_image(user_auth, imageDigest, image_record)

            # disable the webhook call for image state transistion to 'analyzing'
            #try:
            #    for image_detail in image_record['image_detail']:
            #        fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag']
            #        npayload = {
            #            'last_eval': {'imageDigest': imageDigest, 'analysis_status': last_analysis_status},
            #            'curr_eval': {'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status']},
            #        }
            #        rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload)
            #except Exception as err:
            #    logger.warn("failed to enqueue notification on image analysis state update - exception: " + str(err))

            # actually do analysis
            registry_creds = catalog.get_registry(user_auth)
            try:
                image_data = perform_analyze(
                    userId,
                    manifest,
                    image_record,
                    registry_creds,
                    layer_cache_enable=layer_cache_enable)
            except AnchoreException as e:
                event = events.AnalyzeImageFail(user_id=userId,
                                                image_digest=imageDigest,
                                                error=e.to_dict())
                raise

            imageId = None
            try:
                imageId = image_data[0]['image']['imageId']
            except Exception as err:
                logger.warn(
                    "could not get imageId after analysis or from image record - exception: "
                    + str(err))

            try:
                logger.debug("archiving analysis data")
                rc = catalog.put_document(user_auth, 'analysis_data',
                                          imageDigest, image_data)
            except Exception as e:
                err = CatalogClientError(
                    msg='Failed to upload analysis data to catalog', cause=e)
                event = events.ArchiveAnalysisFail(user_id=userId,
                                                   image_digest=imageDigest,
                                                   error=err.to_dict())
                raise err

            if rc:
                try:
                    logger.debug("extracting image content data")
                    image_content_data = {}
                    for content_type in anchore_engine.services.common.image_content_types + anchore_engine.services.common.image_metadata_types:
                        try:
                            image_content_data[
                                content_type] = anchore_engine.services.common.extract_analyzer_content(
                                    image_data,
                                    content_type,
                                    manifest=manifest)
                        except:
                            image_content_data[content_type] = {}

                    if image_content_data:
                        logger.debug("adding image content data to archive")
                        rc = catalog.put_document(user_auth,
                                                  'image_content_data',
                                                  imageDigest,
                                                  image_content_data)

                    try:
                        logger.debug(
                            "adding image analysis data to image_record")
                        anchore_engine.services.common.update_image_record_with_analysis_data(
                            image_record, image_data)

                    except Exception as err:
                        raise err

                except Exception as err:
                    logger.warn(
                        "could not store image content metadata to archive - exception: "
                        + str(err))

                logger.debug("adding image record to policy-engine service (" +
                             str(userId) + " : " + str(imageId) + ")")
                try:
                    if not imageId:
                        raise Exception(
                            "cannot add image to policy engine without an imageId"
                        )

                    localconfig = anchore_engine.configuration.localconfig.get_config(
                    )
                    verify = localconfig['internal_ssl_verify']

                    client = anchore_engine.clients.policy_engine.get_client(
                        user=system_user_auth[0],
                        password=system_user_auth[1],
                        verify_ssl=verify)

                    try:
                        logger.debug(
                            "clearing any existing record in policy engine for image: "
                            + str(imageId))
                        rc = client.delete_image(user_id=userId,
                                                 image_id=imageId)
                    except Exception as err:
                        logger.warn("exception on pre-delete - exception: " +
                                    str(err))

                    logger.info('Loading image: {} {}'.format(userId, imageId))
                    request = ImageIngressRequest(
                        user_id=userId,
                        image_id=imageId,
                        fetch_url='catalog://' + str(userId) +
                        '/analysis_data/' + str(imageDigest))
                    logger.debug("policy engine request: " + str(request))
                    resp = client.ingress_image(request)
                    logger.debug("policy engine image add response: " +
                                 str(resp))

                except Exception as err:
                    import traceback
                    traceback.print_exc()
                    newerr = PolicyEngineClientError(
                        msg='Adding image to policy-engine failed',
                        cause=str(err))
                    event = events.LoadAnalysisFail(user_id=userId,
                                                    image_digest=imageDigest,
                                                    error=newerr.to_dict())
                    raise newerr

                logger.debug("updating image catalog record analysis_status")

                last_analysis_status = image_record['analysis_status']
                image_record[
                    'analysis_status'] = anchore_engine.subsys.taskstate.complete_state(
                        'analyze')
                image_record['analyzed_at'] = int(time.time())
                rc = catalog.update_image(user_auth, imageDigest, image_record)

                try:
                    annotations = {}
                    try:
                        if image_record.get('annotations', '{}'):
                            annotations = json.loads(
                                image_record.get('annotations', '{}'))
                    except Exception as err:
                        logger.warn(
                            "could not marshal annotations from json - exception: "
                            + str(err))

                    for image_detail in image_record['image_detail']:
                        fulltag = image_detail['registry'] + "/" + image_detail[
                            'repo'] + ":" + image_detail['tag']
                        last_payload = {
                            'imageDigest': imageDigest,
                            'analysis_status': last_analysis_status,
                            'annotations': annotations
                        }
                        curr_payload = {
                            'imageDigest': imageDigest,
                            'analysis_status': image_record['analysis_status'],
                            'annotations': annotations
                        }
                        npayload = {
                            'last_eval': last_payload,
                            'curr_eval': curr_payload,
                        }
                        if annotations:
                            npayload['annotations'] = annotations

                        rc = anchore_engine.subsys.notifications.queue_notification(
                            userId, fulltag, 'analysis_update', npayload)
                except Exception as err:
                    logger.warn(
                        "failed to enqueue notification on image analysis state update - exception: "
                        + str(err))

            else:
                err = CatalogClientError(
                    msg='Failed to upload analysis data to catalog',
                    cause='Invalid response from catalog API - {}'.format(
                        str(rc)))
                event = events.ArchiveAnalysisFail(user_id=userId,
                                                   image_digest=imageDigest,
                                                   error=err.to_dict())
                raise err

            logger.info("analysis complete: " + str(userId) + " : " +
                        str(imageDigest))

            logger.spew("TIMING MARK1: " + str(int(time.time()) - timer))

            try:
                run_time = float(time.time() - timer)
                #current_avg_count = current_avg_count + 1.0
                #new_avg = current_avg + ((run_time - current_avg) / current_avg_count)
                #current_avg = new_avg

                anchore_engine.subsys.metrics.histogram_observe(
                    'anchore_analysis_time_seconds',
                    run_time,
                    buckets=[
                        1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0,
                        1800.0, 3600.0
                    ],
                    status="success")
                #anchore_engine.subsys.metrics.counter_inc('anchore_images_analyzed_total')

                #localconfig = anchore_engine.configuration.localconfig.get_config()
                #service_record = {'hostid': localconfig['host_id'], 'servicename': servicename}
                #anchore_engine.subsys.servicestatus.set_status(service_record, up=True, available=True, detail={'avg_analysis_time_sec': current_avg, 'total_analysis_count': current_avg_count}, update_db=True)

            except Exception as err:
                logger.warn(str(err))
                pass

        except Exception as err:
            run_time = float(time.time() - timer)
            logger.exception("problem analyzing image - exception: " +
                             str(err))
            anchore_engine.subsys.metrics.histogram_observe(
                'anchore_analysis_time_seconds',
                run_time,
                buckets=[
                    1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0,
                    3600.0
                ],
                status="fail")
            image_record[
                'analysis_status'] = anchore_engine.subsys.taskstate.fault_state(
                    'analyze')
            image_record[
                'image_status'] = anchore_engine.subsys.taskstate.fault_state(
                    'image_status')
            rc = catalog.update_image(user_auth, imageDigest, image_record)
        finally:
            if event:
                try:
                    catalog.add_event(user_auth, event)
                except:
                    logger.error(
                        'Ignoring error creating analysis failure event')

    except Exception as err:
        logger.warn("job processing bailed - exception: " + str(err))
        raise err

    return (True)