def process_analyzer_job(system_user_auth, qobj, layer_cache_enable): global current_avg, current_avg_count timer = int(time.time()) try: record = qobj['data'] userId = record['userId'] image_record = record['image_record'] manifest = record['manifest'] imageDigest = image_record['imageDigest'] user_record = catalog.get_user(system_user_auth, userId) user_auth = (user_record['userId'], user_record['password']) # check to make sure image is still in DB try: image_records = catalog.get_image(user_auth, imageDigest=imageDigest) if image_records: image_record = image_records[0] else: raise Exception("empty image record from catalog") except Exception as err: logger.warn( "dequeued image cannot be fetched from catalog - skipping analysis (" + str(imageDigest) + ") - exception: " + str(err)) return (True) logger.info("image dequeued for analysis: " + str(userId) + " : " + str(imageDigest)) try: logger.spew("TIMING MARK0: " + str(int(time.time()) - timer)) last_analysis_status = image_record['analysis_status'] image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.working_state( 'analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) # disable the webhook call for image state transistion to 'analyzing' #try: # for image_detail in image_record['image_detail']: # fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag'] # npayload = { # 'last_eval': {'imageDigest': imageDigest, 'analysis_status': last_analysis_status}, # 'curr_eval': {'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status']}, # } # rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload) #except Exception as err: # logger.warn("failed to enqueue notification on image analysis state update - exception: " + str(err)) # actually do analysis registry_creds = catalog.get_registry(user_auth) image_data = perform_analyze(userId, manifest, image_record, registry_creds, layer_cache_enable=layer_cache_enable) imageId = None try: imageId = image_data[0]['image']['imageId'] except Exception as err: logger.warn( "could not get imageId after analysis or from image record - exception: " + str(err)) logger.debug("archiving analysis data") rc = catalog.put_document(user_auth, 'analysis_data', imageDigest, image_data) if rc: try: logger.debug("extracting image content data") image_content_data = {} for content_type in anchore_engine.services.common.image_content_types: try: image_content_data[ content_type] = anchore_engine.services.common.extract_analyzer_content( image_data, content_type) except: image_content_data[content_type] = {} if image_content_data: logger.debug("adding image content data to archive") rc = catalog.put_document(user_auth, 'image_content_data', imageDigest, image_content_data) try: logger.debug( "adding image analysis data to image_record") anchore_engine.services.common.update_image_record_with_analysis_data( image_record, image_data) except Exception as err: raise err except Exception as err: logger.warn( "could not store image content metadata to archive - exception: " + str(err)) logger.debug("adding image record to policy-engine service (" + str(userId) + " : " + str(imageId) + ")") try: if not imageId: raise Exception( "cannot add image to policy engine without an imageId" ) localconfig = anchore_engine.configuration.localconfig.get_config( ) verify = localconfig['internal_ssl_verify'] client = anchore_engine.clients.policy_engine.get_client( user=system_user_auth[0], password=system_user_auth[1], verify_ssl=verify) try: logger.debug( "clearing any existing record in policy engine for image: " + str(imageId)) rc = client.delete_image(user_id=userId, image_id=imageId) except Exception as err: logger.warn("exception on pre-delete - exception: " + str(err)) request = ImageIngressRequest() request.user_id = userId request.image_id = imageId request.fetch_url = 'catalog://' + str( userId) + '/analysis_data/' + str(imageDigest) logger.debug("policy engine request: " + str(request)) resp = client.ingress_image(request) logger.debug("policy engine image add response: " + str(resp)) try: # force a fresh CVE scan resp = client.get_image_vulnerabilities( user_id=userId, image_id=imageId, force_refresh=True) except Exception as err: logger.warn( "post analysis CVE scan failed for image: " + str(imageId)) except Exception as err: raise Exception( "adding image to policy-engine failed - exception: " + str(err)) logger.debug("updating image catalog record analysis_status") last_analysis_status = image_record['analysis_status'] image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.complete_state( 'analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) try: annotations = {} try: annotations = json.loads( image_record.get('annotations', {})) except Exception as err: logger.warn( "could not marshal annotations from json - exception: " + str(err)) for image_detail in image_record['image_detail']: fulltag = image_detail['registry'] + "/" + image_detail[ 'repo'] + ":" + image_detail['tag'] last_payload = { 'imageDigest': imageDigest, 'analysis_status': last_analysis_status, 'annotations': annotations } curr_payload = { 'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status'], 'annotations': annotations } npayload = { 'last_eval': last_payload, 'curr_eval': curr_payload, } if annotations: npayload['annotations'] = annotations rc = anchore_engine.subsys.notifications.queue_notification( userId, fulltag, 'analysis_update', npayload) except Exception as err: logger.warn( "failed to enqueue notification on image analysis state update - exception: " + str(err)) else: raise Exception("analysis archive failed to store") logger.info("analysis complete: " + str(userId) + " : " + str(imageDigest)) logger.spew("TIMING MARK1: " + str(int(time.time()) - timer)) try: run_time = float(time.time() - timer) current_avg_count = current_avg_count + 1.0 new_avg = current_avg + ( (run_time - current_avg) / current_avg_count) current_avg = new_avg except: pass except Exception as err: logger.exception("problem analyzing image - exception: " + str(err)) image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.fault_state( 'analyze') image_record[ 'image_status'] = anchore_engine.subsys.taskstate.fault_state( 'image_status') rc = catalog.update_image(user_auth, imageDigest, image_record) except Exception as err: logger.warn("job processing bailed - exception: " + str(err)) raise err return (True)
def process_analyzer_job(system_user_auth, qobj): global current_avg, current_avg_count timer = int(time.time()) try: record = qobj['data'] userId = record['userId'] image_record = record['image_record'] imageDigest = image_record['imageDigest'] user_record = catalog.get_user(system_user_auth, userId) user_auth = (user_record['userId'], user_record['password']) # check to make sure image is still in DB try: image_records = catalog.get_image(user_auth, imageDigest=imageDigest) if image_records: image_record = image_records[0] else: raise Exception("empty image record from catalog") except Exception as err: logger.warn("dequeued image cannot be fetched from catalog - skipping analysis (" + str( imageDigest) + ") - exception: " + str(err)) return (True) logger.info("image dequeued for analysis: " + str(userId) + " : " + str(imageDigest)) try: logger.spew("TIMING MARK0: " + str(int(time.time()) - timer)) image_record['analysis_status'] = anchore_engine.subsys.taskstate.working_state('analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) # actually do analysis # for pullstring in pullstrings.keys(): for image_detail in image_record['image_detail']: pullstring = image_detail['registry'] + "/" + image_detail['repo'] + "@" + image_detail['digest'] fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag'] imageId = None if 'imageId' in image_detail and image_detail['imageId']: imageId = image_detail['imageId'] logger.info("analysis starting: " + str(userId) + " : " + str(imageDigest) + " : " + str(fulltag) + " : " + str(imageId)) logger.spew("TIMING MARKX: " + str(int(time.time()) - timer)) registry_creds = catalog.get_registry(user_auth) image_data, query_data = perform_analyze(userId, pullstring, fulltag, image_detail, registry_creds) logger.spew("TIMING MARKY: " + str(int(time.time()) - timer)) logger.debug("archiving query data") rc = catalog.put_document(user_auth, 'query_data', imageDigest, query_data) if rc: logger.debug("storing image query data to catalog") else: raise Exception("query archive failed to store") if not imageId: try: imageId = image_data[0]['image']['imageId'] except Exception as err: logger.warn("could not get imageId after analysis or from image record - exception: " + str(err)) logger.debug("archiving analysis data") rc = catalog.put_document(user_auth, 'analysis_data', imageDigest, image_data) if rc: try: logger.debug("extracting image content data") image_content_data = {} for content_type in anchore_engine.services.common.image_content_types: try: image_content_data[content_type] = anchore_engine.services.common.extract_analyzer_content(image_data, content_type) except: image_content_data[content_type] = {} if image_content_data: logger.debug("adding image content data to archive") rc = catalog.put_document(user_auth, 'image_content_data', imageDigest, image_content_data) image_summary_data = {} try: image_summary_data = anchore_engine.services.common.extract_analyzer_content(image_data, 'metadata') except: image_summary_data = {} if image_summary_data: logger.debug("adding image summary data to archive") rc = catalog.put_document(user_auth, 'image_summary_data', imageDigest, image_summary_data) except Exception as err: logger.warn("could not store image content metadata to archive - exception: " + str(err)) logger.debug("adding image record to policy-engine service (" + str(userId) + " : " + str(imageId) + ")") try: if not imageId: raise Exception("cannot add image to policy engine without an imageId") localconfig = anchore_engine.configuration.localconfig.get_config() verify = localconfig['internal_ssl_verify'] client = anchore_engine.clients.policy_engine.get_client(user=system_user_auth[0], password=system_user_auth[1], verify_ssl=verify) try: logger.debug("clearing any existing record in policy engine for image: " + str(imageId)) rc = client.delete_image(user_id=userId, image_id=imageId) except Exception as err: logger.warn("exception on pre-delete - exception: " + str(err)) request = ImageIngressRequest() request.user_id = userId request.image_id = imageId request.fetch_url='catalog://'+str(userId)+'/analysis_data/'+str(imageDigest) logger.debug("policy engine request: " + str(request)) resp = client.ingress_image(request) logger.debug("policy engine image add response: " + str(resp)) try: # force a fresh CVE scan resp = client.get_image_vulnerabilities(user_id=userId, image_id=imageId, force_refresh=True) except Exception as err: logger.warn("post analysis CVE scan failed for image: " + str(imageId)) except Exception as err: raise Exception("adding image to policy-engine failed - exception: " + str(err)) logger.debug("updating image catalog record analysis_status") image_record['analysis_status'] = anchore_engine.subsys.taskstate.complete_state('analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) else: raise Exception("analysis archive failed to store") logger.info("analysis complete: " + str(userId) + " : " + str(imageDigest) + " : " + str(fulltag)) logger.spew("TIMING MARK1: " + str(int(time.time()) - timer)) try: run_time = float(time.time() - timer) current_avg_count = current_avg_count + 1.0 new_avg = current_avg + ((run_time - current_avg) / current_avg_count) current_avg = new_avg except: pass except Exception as err: logger.exception("problem analyzing image - exception: " + str(err)) image_record['analysis_status'] = anchore_engine.subsys.taskstate.fault_state('analyze') image_record['image_status'] = anchore_engine.subsys.taskstate.fault_state('image_status') rc = catalog.update_image(user_auth, imageDigest, image_record) except Exception as err: logger.warn("job processing bailed - exception: " + str(err)) raise err return (True)
def process_analyzer_job(system_user_auth, qobj, layer_cache_enable): global servicename #current_avg, current_avg_count timer = int(time.time()) event = None try: logger.debug('dequeued object: {}'.format(qobj)) record = qobj['data'] userId = record['userId'] imageDigest = record['imageDigest'] manifest = record['manifest'] user_record = catalog.get_user(system_user_auth, userId) user_auth = (user_record['userId'], user_record['password']) # check to make sure image is still in DB try: image_records = catalog.get_image(user_auth, imageDigest=imageDigest) if image_records: image_record = image_records[0] else: raise Exception("empty image record from catalog") except Exception as err: logger.warn( "dequeued image cannot be fetched from catalog - skipping analysis (" + str(imageDigest) + ") - exception: " + str(err)) return (True) logger.info("image dequeued for analysis: " + str(userId) + " : " + str(imageDigest)) if image_record[ 'analysis_status'] != anchore_engine.subsys.taskstate.base_state( 'analyze'): logger.debug( "dequeued image is not in base state - skipping analysis") return (True) try: logger.spew("TIMING MARK0: " + str(int(time.time()) - timer)) last_analysis_status = image_record['analysis_status'] image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.working_state( 'analyze') rc = catalog.update_image(user_auth, imageDigest, image_record) # disable the webhook call for image state transistion to 'analyzing' #try: # for image_detail in image_record['image_detail']: # fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag'] # npayload = { # 'last_eval': {'imageDigest': imageDigest, 'analysis_status': last_analysis_status}, # 'curr_eval': {'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status']}, # } # rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload) #except Exception as err: # logger.warn("failed to enqueue notification on image analysis state update - exception: " + str(err)) # actually do analysis registry_creds = catalog.get_registry(user_auth) try: image_data = perform_analyze( userId, manifest, image_record, registry_creds, layer_cache_enable=layer_cache_enable) except AnchoreException as e: event = events.AnalyzeImageFail(user_id=userId, image_digest=imageDigest, error=e.to_dict()) raise imageId = None try: imageId = image_data[0]['image']['imageId'] except Exception as err: logger.warn( "could not get imageId after analysis or from image record - exception: " + str(err)) try: logger.debug("archiving analysis data") rc = catalog.put_document(user_auth, 'analysis_data', imageDigest, image_data) except Exception as e: err = CatalogClientError( msg='Failed to upload analysis data to catalog', cause=e) event = events.ArchiveAnalysisFail(user_id=userId, image_digest=imageDigest, error=err.to_dict()) raise err if rc: try: logger.debug("extracting image content data") image_content_data = {} for content_type in anchore_engine.services.common.image_content_types + anchore_engine.services.common.image_metadata_types: try: image_content_data[ content_type] = anchore_engine.services.common.extract_analyzer_content( image_data, content_type, manifest=manifest) except: image_content_data[content_type] = {} if image_content_data: logger.debug("adding image content data to archive") rc = catalog.put_document(user_auth, 'image_content_data', imageDigest, image_content_data) try: logger.debug( "adding image analysis data to image_record") anchore_engine.services.common.update_image_record_with_analysis_data( image_record, image_data) except Exception as err: raise err except Exception as err: logger.warn( "could not store image content metadata to archive - exception: " + str(err)) logger.debug("adding image record to policy-engine service (" + str(userId) + " : " + str(imageId) + ")") try: if not imageId: raise Exception( "cannot add image to policy engine without an imageId" ) localconfig = anchore_engine.configuration.localconfig.get_config( ) verify = localconfig['internal_ssl_verify'] client = anchore_engine.clients.policy_engine.get_client( user=system_user_auth[0], password=system_user_auth[1], verify_ssl=verify) try: logger.debug( "clearing any existing record in policy engine for image: " + str(imageId)) rc = client.delete_image(user_id=userId, image_id=imageId) except Exception as err: logger.warn("exception on pre-delete - exception: " + str(err)) logger.info('Loading image: {} {}'.format(userId, imageId)) request = ImageIngressRequest( user_id=userId, image_id=imageId, fetch_url='catalog://' + str(userId) + '/analysis_data/' + str(imageDigest)) logger.debug("policy engine request: " + str(request)) resp = client.ingress_image(request) logger.debug("policy engine image add response: " + str(resp)) except Exception as err: import traceback traceback.print_exc() newerr = PolicyEngineClientError( msg='Adding image to policy-engine failed', cause=str(err)) event = events.LoadAnalysisFail(user_id=userId, image_digest=imageDigest, error=newerr.to_dict()) raise newerr logger.debug("updating image catalog record analysis_status") last_analysis_status = image_record['analysis_status'] image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.complete_state( 'analyze') image_record['analyzed_at'] = int(time.time()) rc = catalog.update_image(user_auth, imageDigest, image_record) try: annotations = {} try: if image_record.get('annotations', '{}'): annotations = json.loads( image_record.get('annotations', '{}')) except Exception as err: logger.warn( "could not marshal annotations from json - exception: " + str(err)) for image_detail in image_record['image_detail']: fulltag = image_detail['registry'] + "/" + image_detail[ 'repo'] + ":" + image_detail['tag'] last_payload = { 'imageDigest': imageDigest, 'analysis_status': last_analysis_status, 'annotations': annotations } curr_payload = { 'imageDigest': imageDigest, 'analysis_status': image_record['analysis_status'], 'annotations': annotations } npayload = { 'last_eval': last_payload, 'curr_eval': curr_payload, } if annotations: npayload['annotations'] = annotations rc = anchore_engine.subsys.notifications.queue_notification( userId, fulltag, 'analysis_update', npayload) except Exception as err: logger.warn( "failed to enqueue notification on image analysis state update - exception: " + str(err)) else: err = CatalogClientError( msg='Failed to upload analysis data to catalog', cause='Invalid response from catalog API - {}'.format( str(rc))) event = events.ArchiveAnalysisFail(user_id=userId, image_digest=imageDigest, error=err.to_dict()) raise err logger.info("analysis complete: " + str(userId) + " : " + str(imageDigest)) logger.spew("TIMING MARK1: " + str(int(time.time()) - timer)) try: run_time = float(time.time() - timer) #current_avg_count = current_avg_count + 1.0 #new_avg = current_avg + ((run_time - current_avg) / current_avg_count) #current_avg = new_avg anchore_engine.subsys.metrics.histogram_observe( 'anchore_analysis_time_seconds', run_time, buckets=[ 1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0, 3600.0 ], status="success") #anchore_engine.subsys.metrics.counter_inc('anchore_images_analyzed_total') #localconfig = anchore_engine.configuration.localconfig.get_config() #service_record = {'hostid': localconfig['host_id'], 'servicename': servicename} #anchore_engine.subsys.servicestatus.set_status(service_record, up=True, available=True, detail={'avg_analysis_time_sec': current_avg, 'total_analysis_count': current_avg_count}, update_db=True) except Exception as err: logger.warn(str(err)) pass except Exception as err: run_time = float(time.time() - timer) logger.exception("problem analyzing image - exception: " + str(err)) anchore_engine.subsys.metrics.histogram_observe( 'anchore_analysis_time_seconds', run_time, buckets=[ 1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0, 3600.0 ], status="fail") image_record[ 'analysis_status'] = anchore_engine.subsys.taskstate.fault_state( 'analyze') image_record[ 'image_status'] = anchore_engine.subsys.taskstate.fault_state( 'image_status') rc = catalog.update_image(user_auth, imageDigest, image_record) finally: if event: try: catalog.add_event(user_auth, event) except: logger.error( 'Ignoring error creating analysis failure event') except Exception as err: logger.warn("job processing bailed - exception: " + str(err)) raise err return (True)
def images(request_inputs): user_auth = request_inputs['auth'] method = request_inputs['method'] bodycontent = request_inputs['bodycontent'] params = request_inputs['params'] return_object = {} httpcode = 500 userId, pw = user_auth digest = tag = imageId = imageDigest = dockerfile = None history = False if params and 'history' in params: history = params['history'] force = False if params and 'force' in params: force = params['force'] if bodycontent: jsondata = json.loads(bodycontent) if 'digest' in jsondata: digest = jsondata['digest'] elif 'tag' in jsondata: tag = jsondata['tag'] elif 'imageDigest' in jsondata: imageDigest = jsondata['imageDigest'] elif 'imageId' in jsondata: imageId = jsondata['imageId'] if 'dockerfile' in jsondata: dockerfile = jsondata['dockerfile'] try: if method == 'GET': logger.debug("handling GET: ") try: return_object = [] image_records = catalog.get_image(user_auth, digest=digest, tag=tag, imageId=imageId, imageDigest=imageDigest, history=history) for image_record in image_records: return_object.append(make_response_image(user_auth, image_record, params)) httpcode = 200 except Exception as err: raise err elif method == 'POST': logger.debug("handling POST: ") # if not, add it and set it up to be analyzed if not tag: # dont support digest add, yet httpcode = 500 raise Exception("digest add unsupported") # add the image to the catalog image_record = catalog.add_image(user_auth, tag=tag, dockerfile=dockerfile) imageDigest = image_record['imageDigest'] # finally, do any state updates and return if image_record: #logger.debug("fetched image_info: " + json.dumps(image_record, indent=4)) logger.debug("added image: " + str(imageDigest)) # auto-subscribe for NOW for image_detail in image_record['image_detail']: fulltag = image_detail['registry'] + "/" + image_detail['repo'] + ":" + image_detail['tag'] foundtypes = [] try: subscription_records = catalog.get_subscription(user_auth, subscription_key=fulltag) for subscription_record in subscription_records: if subscription_record['subscription_key'] == fulltag: foundtypes.append(subscription_record['subscription_type']) except Exception as err: logger.warn("cannot load subscription records - exception: " + str(err)) sub_types = anchore_engine.services.common.subscription_types for sub_type in sub_types: if sub_type in ['repo_update']: continue if sub_type not in foundtypes: try: default_active = False if sub_type in ['tag_update']: default_active = True catalog.add_subscription(user_auth, {'active': default_active, 'subscription_type': sub_type, 'subscription_key': fulltag}) except: try: catalog.update_subscription(user_auth, {'subscription_type': sub_type, 'subscription_key': fulltag}) except: pass # set the state of the image appropriately currstate = image_record['analysis_status'] if not currstate: newstate = taskstate.init_state('analyze', None) elif force or currstate == taskstate.fault_state('analyze'): newstate = taskstate.reset_state('analyze') elif image_record['image_status'] == 'deleted': newstate = taskstate.reset_state('analyze') else: newstate = currstate if (currstate != newstate) or (force): logger.debug("state change detected: " + str(currstate) + " : " + str(newstate)) image_record.update({'image_status': 'active', 'analysis_status': newstate}) updated_image_record = catalog.update_image(user_auth, imageDigest, image_record) if updated_image_record: image_record = updated_image_record[0] else: logger.debug("no state change detected: " + str(currstate) + " : " + str(newstate)) httpcode = 200 return_object = [make_response_image(user_auth, image_record, params)] else: httpcode = 500 raise Exception("failed to add image") except Exception as err: logger.debug("operation exception: " + str(err)) return_object = anchore_engine.services.common.make_response_error(err, in_httpcode=httpcode) httpcode = return_object['httpcode'] return (return_object, httpcode)
def images(request_inputs): user_auth = request_inputs['auth'] method = request_inputs['method'] bodycontent = request_inputs['bodycontent'] params = request_inputs['params'] return_object = {} httpcode = 500 userId, pw = user_auth fulltag = digest = tag = imageId = imageDigest = dockerfile = annotations = None history = False force = False autosubscribe = True query_fulltag = None if params: if 'history' in params: history = params['history'] if 'force' in params: force = params['force'] if 'autosubscribe' in params: autosubscribe = params['autosubscribe'] if 'fulltag' in params: query_fulltag = params['fulltag'] if bodycontent: jsondata = json.loads(bodycontent) if 'digest' in jsondata: digest = jsondata['digest'] if 'tag' in jsondata: tag = jsondata['tag'] #elif 'imageDigest' in jsondata: # imageDigest = jsondata['imageDigest'] #elif 'imageId' in jsondata: # imageId = jsondata['imageId'] if 'dockerfile' in jsondata: dockerfile = jsondata['dockerfile'] if 'annotations' in jsondata: annotations = jsondata['annotations'] autosubscribes = ['analysis_update'] if autosubscribe: autosubscribes.append("tag_update") try: if method == 'GET': logger.debug("handling GET: ") try: return_object = [] # Query param fulltag has precedence for search if query_fulltag: tag = query_fulltag imageId = imageDigest = digest = None image_records = catalog.get_image(user_auth, digest=digest, tag=tag, imageId=imageId, imageDigest=imageDigest, history=history) for image_record in image_records: return_object.append( make_response_image(user_auth, image_record, params)) httpcode = 200 except Exception as err: raise err elif method == 'POST': logger.debug( "handling POST: input_tag={} input_digest={} input_force={}". format(tag, digest, force)) # if not, add it and set it up to be analyzed if not tag: # dont support digest add, yet httpcode = 400 raise Exception("tag is required for image add") if digest and tag: if not force: httpcode = 400 raise Exception("force is required to add digest+tag") else: try: image_check = catalog.get_image(user_auth, digest=digest, tag=tag, imageId=None, imageDigest=digest, history=False) except Exception as err: httpcode = 400 raise Exception( "image digest must already exist to force re-analyze using tag+digest" ) # add the image to the catalog image_record = catalog.add_image(user_auth, tag=tag, digest=digest, dockerfile=dockerfile, annotations=annotations) imageDigest = image_record['imageDigest'] # finally, do any state updates and return if image_record: logger.debug("added image: " + str(imageDigest)) # auto-subscribe for NOW for image_detail in image_record['image_detail']: fulltag = image_detail['registry'] + "/" + image_detail[ 'repo'] + ":" + image_detail['tag'] foundtypes = [] try: subscription_records = catalog.get_subscription( user_auth, subscription_key=fulltag) except Exception as err: subscription_records = [] for subscription_record in subscription_records: if subscription_record['subscription_key'] == fulltag: foundtypes.append( subscription_record['subscription_type']) sub_types = anchore_engine.services.common.subscription_types for sub_type in sub_types: if sub_type in ['repo_update']: continue if sub_type not in foundtypes: try: default_active = False if sub_type in autosubscribes: logger.debug("auto-subscribing image: " + str(sub_type)) default_active = True catalog.add_subscription( user_auth, { 'active': default_active, 'subscription_type': sub_type, 'subscription_key': fulltag }) except: try: catalog.update_subscription( user_auth, { 'subscription_type': sub_type, 'subscription_key': fulltag }) except: pass # set the state of the image appropriately currstate = image_record['analysis_status'] if not currstate: newstate = taskstate.init_state('analyze', None) elif force or currstate == taskstate.fault_state('analyze'): newstate = taskstate.reset_state('analyze') elif image_record['image_status'] == 'deleted': newstate = taskstate.reset_state('analyze') else: newstate = currstate if (currstate != newstate) or (force): logger.debug("state change detected: " + str(currstate) + " : " + str(newstate)) image_record.update({ 'image_status': 'active', 'analysis_status': newstate }) updated_image_record = catalog.update_image( user_auth, imageDigest, image_record) if updated_image_record: image_record = updated_image_record[0] else: logger.debug("no state change detected: " + str(currstate) + " : " + str(newstate)) httpcode = 200 return_object = [ make_response_image(user_auth, image_record, params) ] else: httpcode = 500 raise Exception("failed to add image") except Exception as err: logger.debug("operation exception: " + str(err)) return_object = anchore_engine.services.common.make_response_error( err, in_httpcode=httpcode) httpcode = return_object['httpcode'] return (return_object, httpcode)