def policy_engine_image_load(client, imageUserId, imageId, imageDigest): resp = None try: request = ImageIngressRequest() request.user_id = imageUserId request.image_id = imageId request.fetch_url='catalog://'+str(imageUserId)+'/analysis_data/'+str(imageDigest) logger.debug("policy engine request (image add): " + str(request)) resp = client.ingress_image(request) logger.spew("policy engine response (image add): " + str(resp)) except Exception as err: logger.error("failed to add/check image: " + str(err)) raise err return(resp)
def ingress_image(ingress_request): """ :param ingress_request json object specifying the identity of the image to sync :return: status result for image load """ if not connexion.request.is_json: abort(400) req = ImageIngressRequest.from_dict(ingress_request) try: # Try this synchronously for now to see how slow it really is t = ImageLoadTask(req.user_id, req.image_id, url=req.fetch_url) result = t.execute() resp = ImageIngressResponse() if not result: resp.status = 'loaded' else: # We're doing a sync call above, so just send loaded. It should be 'accepted' once async works. resp.status = 'loaded' return resp.to_dict(), 200 except Exception as e: abort(500, 'Internal error processing image analysis import')
def ingress_image(ingress_request): """ :param ingress_request json object specifying the identity of the image to sync :return: status result for image load """ req = ImageIngressRequest.from_json(ingress_request) if not req.user_id: raise ValueError("user_id") if not req.image_id: raise ValueError("image_id") try: # Try this synchronously for now to see how slow it really is conn_timeout = ApiRequestContextProxy.get_service().configuration.get( "catalog_client_conn_timeout", DEFAULT_CACHE_CONN_TIMEOUT) read_timeout = ApiRequestContextProxy.get_service().configuration.get( "catalog_client_read_timeout", DEFAULT_CACHE_READ_TIMEOUT) t = ImageLoadTask( req.user_id, req.image_id, url=req.fetch_url, content_conn_timeout=conn_timeout, content_read_timeout=read_timeout, ) result = t.execute() resp = ImageIngressResponse() if not result: resp.status = "loaded" else: # We're doing a sync call above, so just send loaded. It should be 'accepted' once async works. resp.status = "loaded" resp.problems = list() return resp.to_json(), 200 except Exception as e: log.exception("Error loading image into policy engine") return make_response_error(e, in_httpcode=500), 500
def process_analyzer_job(system_user_auth, qobj, layer_cache_enable): global current_avg, current_avg_count timer = int(time.time()) try: record = qobj['data'] userId = record['userId'] image_record = record['image_record'] manifest = record['manifest'] imageDigest = image_record['imageDigest'] user_record = catalog.get_user(system_user_auth, userId) user_auth = (user_record['userId'], user_record['password']) # check to make sure image is still in DB try: image_records = catalog.get_image(user_auth, imageDigest=imageDigest) if image_records: image_record = image_records[0] else: raise Exception("empty image record from catalog") except Exception as err: logger.warn( "dequeued image cannot be fetched from catalog - skipping analysis (" + str(imageDigest) + ") - exception: " + str(err)) return (True) logger.info("image dequeued for analysis: " + str(userId) + " : " + str(imageDigest)) try: logger.spew("TIMING MARK0: " + str(int(time.time()) - timer)) last_analysis_status = image_record['analysis_status'] image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.working_state( 'analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) # disable the webhook call for image state transistion to 'analyzing' #try: # for image_detail in image_record['image_detail']: # fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag'] # npayload = { # 'last_eval': {'imageDigest': imageDigest, 'analysis_status': last_analysis_status}, # 'curr_eval': {'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status']}, # } # rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload) #except Exception as err: # logger.warn("failed to enqueue notification on image analysis state update - exception: " + str(err)) # actually do analysis registry_creds = catalog.get_registry(user_auth) image_data = perform_analyze(userId, manifest, image_record, registry_creds, layer_cache_enable=layer_cache_enable) imageId = None try: imageId = image_data[0]['image']['imageId'] except Exception as err: logger.warn( "could not get imageId after analysis or from image record - exception: " + str(err)) logger.debug("archiving analysis data") rc = catalog.put_document(user_auth, 'analysis_data', imageDigest, image_data) if rc: try: logger.debug("extracting image content data") image_content_data = {} for content_type in anchore_engine.services.common.image_content_types: try: image_content_data[ content_type] = anchore_engine.services.common.extract_analyzer_content( image_data, content_type) except: image_content_data[content_type] = {} if image_content_data: logger.debug("adding image content data to archive") rc = catalog.put_document(user_auth, 'image_content_data', imageDigest, image_content_data) try: logger.debug( "adding image analysis data to image_record") anchore_engine.services.common.update_image_record_with_analysis_data( image_record, image_data) except Exception as err: raise err except Exception as err: logger.warn( "could not store image content metadata to archive - exception: " + str(err)) logger.debug("adding image record to policy-engine service (" + str(userId) + " : " + str(imageId) + ")") try: if not imageId: raise Exception( "cannot add image to policy engine without an imageId" ) localconfig = anchore_engine.configuration.localconfig.get_config( ) verify = localconfig['internal_ssl_verify'] client = anchore_engine.clients.policy_engine.get_client( user=system_user_auth[0], password=system_user_auth[1], verify_ssl=verify) try: logger.debug( "clearing any existing record in policy engine for image: " + str(imageId)) rc = client.delete_image(user_id=userId, image_id=imageId) except Exception as err: logger.warn("exception on pre-delete - exception: " + str(err)) request = ImageIngressRequest() request.user_id = userId request.image_id = imageId request.fetch_url = 'catalog://' + str( userId) + '/analysis_data/' + str(imageDigest) logger.debug("policy engine request: " + str(request)) resp = client.ingress_image(request) logger.debug("policy engine image add response: " + str(resp)) try: # force a fresh CVE scan resp = client.get_image_vulnerabilities( user_id=userId, image_id=imageId, force_refresh=True) except Exception as err: logger.warn( "post analysis CVE scan failed for image: " + str(imageId)) except Exception as err: raise Exception( "adding image to policy-engine failed - exception: " + str(err)) logger.debug("updating image catalog record analysis_status") last_analysis_status = image_record['analysis_status'] image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.complete_state( 'analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) try: annotations = {} try: annotations = json.loads( image_record.get('annotations', {})) except Exception as err: logger.warn( "could not marshal annotations from json - exception: " + str(err)) for image_detail in image_record['image_detail']: fulltag = image_detail['registry'] + "/" + image_detail[ 'repo'] + ":" + image_detail['tag'] last_payload = { 'imageDigest': imageDigest, 'analysis_status': last_analysis_status, 'annotations': annotations } curr_payload = { 'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status'], 'annotations': annotations } npayload = { 'last_eval': last_payload, 'curr_eval': curr_payload, } if annotations: npayload['annotations'] = annotations rc = anchore_engine.subsys.notifications.queue_notification( userId, fulltag, 'analysis_update', npayload) except Exception as err: logger.warn( "failed to enqueue notification on image analysis state update - exception: " + str(err)) else: raise Exception("analysis archive failed to store") logger.info("analysis complete: " + str(userId) + " : " + str(imageDigest)) logger.spew("TIMING MARK1: " + str(int(time.time()) - timer)) try: run_time = float(time.time() - timer) current_avg_count = current_avg_count + 1.0 new_avg = current_avg + ( (run_time - current_avg) / current_avg_count) current_avg = new_avg except: pass except Exception as err: logger.exception("problem analyzing image - exception: " + str(err)) image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.fault_state( 'analyze') image_record[ 'image_status'] = anchore_engine.subsys.taskstate.fault_state( 'image_status') rc = catalog.update_image(user_auth, imageDigest, image_record) except Exception as err: logger.warn("job processing bailed - exception: " + str(err)) raise err return (True)
def process_analyzer_job(system_user_auth, qobj): global current_avg, current_avg_count timer = int(time.time()) try: record = qobj['data'] userId = record['userId'] image_record = record['image_record'] imageDigest = image_record['imageDigest'] user_record = catalog.get_user(system_user_auth, userId) user_auth = (user_record['userId'], user_record['password']) # check to make sure image is still in DB try: image_records = catalog.get_image(user_auth, imageDigest=imageDigest) if image_records: image_record = image_records[0] else: raise Exception("empty image record from catalog") except Exception as err: logger.warn("dequeued image cannot be fetched from catalog - skipping analysis (" + str( imageDigest) + ") - exception: " + str(err)) return (True) logger.info("image dequeued for analysis: " + str(userId) + " : " + str(imageDigest)) try: logger.spew("TIMING MARK0: " + str(int(time.time()) - timer)) image_record['analysis_status'] = anchore_engine.subsys.taskstate.working_state('analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) # actually do analysis # for pullstring in pullstrings.keys(): for image_detail in image_record['image_detail']: pullstring = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail['digest'] fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag'] imageId = None if 'imageId' in image_detail and image_detail['imageId']: imageId = image_detail['imageId'] logger.info("analysis starting: " + str(userId) + " : " + str(imageDigest) + " : " + str(fulltag) + " : " + str(imageId)) logger.spew("TIMING MARKX: " + str(int(time.time()) - timer)) registry_creds = catalog.get_registry(user_auth) image_data, query_data = perform_analyze(userId, pullstring, fulltag, image_detail, registry_creds) logger.spew("TIMING MARKY: " + str(int(time.time()) - timer)) logger.debug("archiving query data") rc = catalog.put_document(user_auth, 'query_data', imageDigest, query_data) if rc: logger.debug("storing image query data to catalog") else: raise Exception("query archive failed to store") if not imageId: try: imageId = image_data[0]['image']['imageId'] except Exception as err: logger.warn("could not get imageId after analysis or from image record - exception: " + str(err)) logger.debug("archiving analysis data") rc = catalog.put_document(user_auth, 'analysis_data', imageDigest, image_data) if rc: try: logger.debug("extracting image content data") image_content_data = {} for content_type in anchore_engine.services.common.image_content_types: try: image_content_data[content_type] = anchore_engine.services.common.extract_analyzer_content(image_data, content_type) except: image_content_data[content_type] = {} if image_content_data: logger.debug("adding image content data to archive") rc = catalog.put_document(user_auth, 'image_content_data', imageDigest, image_content_data) image_summary_data = {} try: image_summary_data = anchore_engine.services.common.extract_analyzer_content(image_data, 'metadata') except: image_summary_data = {} if image_summary_data: logger.debug("adding image summary data to archive") rc = catalog.put_document(user_auth, 'image_summary_data', imageDigest, image_summary_data) except Exception as err: logger.warn("could not store image content metadata to archive - exception: " + str(err)) logger.debug("adding image record to policy-engine service (" + str(userId) + " : " + str(imageId) + ")") try: if not imageId: raise Exception("cannot add image to policy engine without an imageId") localconfig = anchore_engine.configuration.localconfig.get_config() verify = localconfig['internal_ssl_verify'] client = anchore_engine.clients.policy_engine.get_client(user=system_user_auth[0], password=system_user_auth[1], verify_ssl=verify) try: logger.debug("clearing any existing record in policy engine for image: " + str(imageId)) rc = client.delete_image(user_id=userId, image_id=imageId) except Exception as err: logger.warn("exception on pre-delete - exception: " + str(err)) request = ImageIngressRequest() request.user_id = userId request.image_id = imageId request.fetch_url='catalog://'+str(userId)+'/analysis_data/'+str(imageDigest) logger.debug("policy engine request: " + str(request)) resp = client.ingress_image(request) logger.debug("policy engine image add response: " + str(resp)) try: # force a fresh CVE scan resp = client.get_image_vulnerabilities(user_id=userId, image_id=imageId, force_refresh=True) except Exception as err: logger.warn("post analysis CVE scan failed for image: " + str(imageId)) except Exception as err: raise Exception("adding image to policy-engine failed - exception: " + str(err)) logger.debug("updating image catalog record analysis_status") image_record['analysis_status'] = anchore_engine.subsys.taskstate.complete_state('analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) else: raise Exception("analysis archive failed to store") logger.info("analysis complete: " + str(userId) + " : " + str(imageDigest) + " : " + str(fulltag)) logger.spew("TIMING MARK1: " + str(int(time.time()) - timer)) try: run_time = float(time.time() - timer) current_avg_count = current_avg_count + 1.0 new_avg = current_avg + ((run_time - current_avg) / current_avg_count) current_avg = new_avg except: pass except Exception as err: logger.exception("problem analyzing image - exception: " + str(err)) image_record['analysis_status'] = anchore_engine.subsys.taskstate.fault_state('analyze') image_record['image_status'] = anchore_engine.subsys.taskstate.fault_state('image_status') rc = catalog.update_image(user_auth, imageDigest, image_record) except Exception as err: logger.warn("job processing bailed - exception: " + str(err)) raise err return (True)
def test_ingress_request(): """ Simple serialization test :return: """ r = ImageIngressRequest() r.user_id = 'user' r.image_id = 'image1' r.fetch_url = 'catalog://something.com/user/image_analysis/image1' assert r.to_json() == { 'user_id': 'user', 'image_id': 'image1', 'fetch_url': 'catalog://something.com/user/image_analysis/image1' } r = ImageIngressRequest() r.user_id = 'user' r.image_id = 'image1' r.fetch_url = 'https://someserver.com/file' assert r.to_json() == { 'user_id': 'user', 'image_id': 'image1', 'fetch_url': 'https://someserver.com/file' } r = ImageIngressRequest() r.user_id = 'user' r.image_id = 'image1' r.fetch_url = 'file:///path/to/file' assert r.to_json() == { 'user_id': 'user', 'image_id': 'image1', 'fetch_url': 'file:///path/to/file' }
def test_ingress_request(): """ Simple serialization test :return: """ r = ImageIngressRequest() r.user_id = "user" r.image_id = "image1" r.fetch_url = "catalog://something.com/user/image_analysis/image1" assert r.to_json() == { "user_id": "user", "image_id": "image1", "fetch_url": "catalog://something.com/user/image_analysis/image1", } r = ImageIngressRequest() r.user_id = "user" r.image_id = "image1" r.fetch_url = "https://someserver.com/file" assert r.to_json() == { "user_id": "user", "image_id": "image1", "fetch_url": "https://someserver.com/file", } r = ImageIngressRequest() r.user_id = "user" r.image_id = "image1" r.fetch_url = "file:///path/to/file" assert r.to_json() == { "user_id": "user", "image_id": "image1", "fetch_url": "file:///path/to/file", }