def delete_image(user_id, image_id): """ DELETE the image and all resources for it. Returns 204 - No Content on success :param user_id: :param image_id: :return: """ db = get_session() try: log.info( "Deleting image {}/{} and all associated resources".format( user_id, image_id ) ) img = db.query(Image).get((image_id, user_id)) if img: get_vulnerabilities_provider().delete_image_vulnerabilities( image=img, db_session=db ) try: conn_timeout = ApiRequestContextProxy.get_service().configuration.get( "catalog_client_conn_timeout", DEFAULT_CACHE_CONN_TIMEOUT ) read_timeout = ApiRequestContextProxy.get_service().configuration.get( "catalog_client_read_timeout", DEFAULT_CACHE_READ_TIMEOUT ) mgr = EvaluationCacheManager( img, None, None, conn_timeout, read_timeout ) mgr.flush() except Exception as ex: log.exception( "Could not delete evaluations for image {}/{} in the cache. May be orphaned".format( user_id, image_id ) ) db.delete(img) db.commit() else: db.rollback() # Idempotently return 204. This isn't properly RESTY, but idempotency on delete makes clients much cleaner. return None, 204 except HTTPException: raise except Exception as e: log.exception( "Error processing DELETE request for image {}/{}".format(user_id, image_id) ) db.rollback() return ( make_response_error( "Error deleting image {}/{}: {}".format(user_id, image_id, e), in_httpcode=500, ), 500, )
def _rescan_cve(img_id): db = get_thread_scoped_session() try: img = db.query(Image).filter_by(user_id="0", id=img_id).one_or_none() get_vulnerabilities_provider().load_image(db, img) db.commit() return except: db.rollback() raise
def query_vulnerabilities_get(id=None, affected_package=None, affected_package_version=None, namespace=None): log.info("Querying vulnerabilities") session = get_session() try: # Normalize to a list if type(namespace) == str: namespace = [namespace] if type(id) == str: ids = [id] else: ids = id return_object = get_vulnerabilities_provider().get_vulnerabilities( ids, affected_package, affected_package_version, namespace, session) httpcode = 200 except Exception as err: log.exception("Error querying vulnerabilities") httpcode = 500 return_object = make_response_error(err, in_httpcode=httpcode) finally: session.close() return return_object, httpcode
def toggle_feed_enabled(feed, enabled): if type(enabled) != bool: raise BadRequest(message="state must be a boolean", detail={"value": enabled}) try: provider = get_vulnerabilities_provider() feed = provider.update_feed_enabled_status(feed, enabled) if not feed: raise ResourceNotFound(feed, detail={}) return feed.to_json(), 200 except InvalidFeed: raise BadRequest( message="Feed not supported on configured vulnerability provider", detail={ "feed": feed, "configured_provider": provider.get_config_name() }, ) except Exception as e: log.error("Could not update feed enabled status") return jsonify(make_response_error(e, in_httpcode=500)), 500
def query_images_by_vulnerability_get( user_id, vulnerability_id=None, severity=None, namespace=None, affected_package=None, vendor_only=True, ): log.info("Querying images by vulnerability {}".format(vulnerability_id)) session = get_session() try: # request prep is unnecessary but keeping it around for now to avoid weird corner cases request_inputs = apis.do_request_prep( connexion.request, default_params={ "vulnerability_id": vulnerability_id, "severity": severity, "namespace": namespace, "affected_package": affected_package, "vendor_only": vendor_only, }, ) request_account_id = request_inputs["userId"] request_id = request_inputs.get("params", {}).get("vulnerability_id", None) request_severity_filter = request_inputs.get("params", {}).get("severity", None) request_namespace_filter = request_inputs.get("params", {}).get( "namespace", None) request_affected_package_filter = request_inputs.get("params", {}).get( "affected_package", None) request_vendor_only = request_inputs.get("params", {}).get("vendor_only", True) return_object = get_vulnerabilities_provider( ).get_images_by_vulnerability( request_account_id, request_id, request_severity_filter, request_namespace_filter, request_affected_package_filter, request_vendor_only, session, ) httpcode = 200 except Exception as err: log.exception("Error querying images by vulnerability") httpcode = 500 return_object = make_response_error(err, in_httpcode=httpcode) finally: session.close() return return_object, httpcode
def _inputs_changed(self, cache_timestamp): # A feed sync has occurred since the eval was done or the image has been updated/reloaded, so inputs can have changed. Must be stale db = get_session() image_updated = self.image.last_modified > cache_timestamp return ( image_updated or get_vulnerabilities_provider().is_image_vulnerabilities_updated( image=self.image, db_session=db, since=cache_timestamp))
def prepare_context(self, image_obj, context): """ :rtype: """ db_session = get_thread_scoped_session() vuln_report = get_vulnerabilities_provider().get_image_vulnerabilities( image_obj, db_session) context.data["loaded_vulnerabilities"] = vuln_report.results return context
def list_feeds(refresh_counts=False): """ GET /feeds :param refresh_counts: forcibly update the group counts (not normally necessary) :return: """ provider = get_vulnerabilities_provider() if refresh_counts: provider.update_feed_group_counts() return [feed.to_json() for feed in provider.get_feeds()]
def execute(self): with timer( "image vulnerabilities refresh for %s/%s" % (self.message.account_id, self.message.image_id), log_level="info", ): logger.debug( "Refreshing image vulnerabilities report for account_id=%s, image_id=%s, image_digest=%s", self.message.account_id, self.message.image_id, self.message.image_digest, ) with session_scope() as session: img = (session.query(Image).filter( Image.user_id == self.message.account_id, Image.id == self.message.image_id, ).one_or_none()) # lookup image first if not img: logger.debug( "No record found for image account=%s, image_id=%s, skipping refresh", self.message.account_id, self.message.image_id, ) return # call the provider with vendor_only and force disabled get_vulnerabilities_provider().get_image_vulnerabilities_json( image=img, vendor_only=False, db_session=session, force_refresh=False, use_store=True, )
def get_image_vulnerabilities(user_id, image_id, force_refresh=False, vendor_only=True): """ Return the vulnerability listing for the specified image and load from catalog if not found and specifically asked to do so. :param user_id: user id of image to evaluate :param image_id: image id to evaluate :param force_refresh: if true, flush and recompute vulnerabilities rather than returning current values :param vendor_only: if true, filter out the vulnerabilities that vendors will explicitly not address :return: """ # Has image? db = get_session() try: img = db.query(Image).get((image_id, user_id)) if not img: return make_response_error("Image not found", in_httpcode=404), 404 provider = get_vulnerabilities_provider() report = provider.get_image_vulnerabilities_json( image=img, vendor_only=vendor_only, db_session=db, force_refresh=force_refresh, use_store=True, ) db.commit() return report, 200 except HTTPException: db.rollback() raise except Exception as e: log.exception( "Error checking image {}, {} for vulnerabiltiies. Rolling back".format( user_id, image_id ) ) db.rollback() return make_response_error(e, in_httpcode=500), 500 finally: db.close()
def run_feeds_update(cls, json_obj=None, force_flush=False) -> Optional[List[FeedSyncResult]]: """ Creates a task and runs it, optionally with a thread if locking is enabled. :return: """ try: vulnerabilities_provider = get_vulnerabilities_provider() sync_configs = compute_selected_configs_to_sync( vulnerabilities_provider.get_config_name(), get_section_for_vulnerabilities(), vulnerabilities_provider.get_default_sync_config(), ) if json_obj: task = cls.from_json(json_obj) if not task: return None task.sync_configs = sync_configs else: task = FeedsUpdateTask(sync_configs=sync_configs, flush=force_flush) result = [] if cls.locking_enabled: run_target_with_lease( account=None, lease_id="feed_sync", ttl=90, target=lambda: result.append(task.execute()), ) # A bit of work-around for the lambda def to get result from thread execution if result: result = result[0] else: result = task.execute() return result except Exception: logger.exception("Error executing feeds update") raise
def get_image_vulnerabilities(user_id, image_id, force_refresh=False, vendor_only=True): """ Return the vulnerability listing for the specified image and load from catalog if not found and specifically asked to do so. Example json output: { "multi" : { "url_column_index" : 7, "result" : { "rows" : [], "rowcount" : 0, "colcount" : 8, "header" : [ "CVE_ID", "Severity", "*Total_Affected", "Vulnerable_Package", "Fix_Available", "Fix_Images", "Rebuild_Images", "URL" ] }, "querycommand" : "/usr/lib/python2.7/site-packages/anchore/anchore-modules/multi-queries/cve-scan.py /ebs_data/anchore/querytmp/queryimages.7026386 /ebs_data/anchore/data /ebs_data/anchore/querytmp/query.59057288 all", "queryparams" : "all", "warns" : [ "0005b136f0fb (prom/prometheus:master) cannot perform CVE scan: no CVE data is currently available for the detected base distro type (busybox:unknown_version,busybox:v1.26.2)" ] } } :param user_id: user id of image to evaluate :param image_id: image id to evaluate :param force_refresh: if true, flush and recompute vulnerabilities rather than returning current values :param vendor_only: if true, filter out the vulnerabilities that vendors will explicitly not address :return: """ # Has image? db = get_session() try: img = db.query(Image).get((image_id, user_id)) if not img: return make_response_error("Image not found", in_httpcode=404), 404 provider = get_vulnerabilities_provider() report = provider.get_image_vulnerabilities( image=img, vendor_only=vendor_only, db_session=db, force_refresh=force_refresh, cache=True, ) db.commit() return report.to_json(), 200 except HTTPException: db.rollback() raise except Exception as e: log.exception( "Error checking image {}, {} for vulnerabiltiies. Rolling back". format(user_id, image_id)) db.rollback() return make_response_error(e, in_httpcode=500), 500 finally: db.close()
def execute(self): """ Execute a load. Fetch from the catalog and send to loader. :return: the ImageLoad result object including the image object and its vulnerabilities or None if image already found """ self.start_time = datetime.datetime.utcnow() try: db = get_session() img = db.query(Image).get((self.image_id, self.user_id)) if img is not None: if not self.force_reload: logger.info( "Image {}/{} already found in the system. Will not re-load." .format(self.user_id, self.image_id)) db.close() return None else: logger.info( "Deleting image {}/{} and all associated resources for reload" .format(self.user_id, self.image_id)) # for pkg_vuln in img.vulnerabilities(): # db.delete(pkg_vuln) db.delete(img) # Close the session during the data fetch. # db.close() image_obj = self._load_image_analysis() if not image_obj: logger.error("Could not load image analysis") raise ImageLoadError( "Failed to load image: user_id = {}, image_id = {}, fetch_url = {}" .format(self.user_id, self.image_id, self.fetch_url)) db = get_session() try: logger.info("Adding image to db") db.add(image_obj) with timer("Generating vulnerability matches", log_level="info"): get_vulnerabilities_provider().load_image( image=image_obj, db_session=db, use_store=True, # save results to cache ) db.commit() except: logger.exception("Error adding image to db") db.rollback() raise return ImageLoadResult(image_obj) except Exception as e: logger.exception("Error loading and scanning image: {}".format( self.image_id)) raise finally: self.stop_time = datetime.datetime.utcnow()
def execute(self) -> List[FeedSyncResult]: logger.info("Starting feed sync. (operation_id={})".format(self.uuid)) # Feed syncs will update the images with any new cves that are pulled in for a the sync. As such, any images that are loaded while the sync itself is in progress need to be # re-scanned for cves since the transaction ordering can result in the images being loaded with data prior to sync but not included in the sync process itself. # Create feed task begin event error = None with session_scope() as session: mgr = identities.manager_factory.for_session(session) catalog_client = internal_client_for(CatalogClient, userId=None) try: notify_event( FeedSyncTaskStarted(groups=list(self.sync_configs.keys( )) if self.sync_configs else "all"), catalog_client, self.uuid, ) except: logger.exception( "Ignoring event generation error before feed sync. (operation_id={})" .format(self.uuid)) start_time = datetime.datetime.utcnow() try: start_time = datetime.datetime.utcnow() updated_data_feeds = list() updated_data_feeds.extend( DataFeeds.sync( sync_util_provider=GrypeProvider().get_sync_utils( self.sync_configs), full_flush=self.full_flush, catalog_client=catalog_client, operation_id=self.uuid, )) updated_data_feeds.extend( DataFeeds.sync( sync_util_provider=LegacyProvider().get_sync_utils( self.sync_configs), full_flush=self.full_flush, catalog_client=catalog_client, operation_id=self.uuid, )) logger.info("Feed sync complete (operation_id={})".format( self.uuid)) return updated_data_feeds except Exception as e: error = e logger.exception( "Failure refreshing and syncing feeds. (operation_id={})". format(self.uuid)) raise finally: end_time = datetime.datetime.utcnow() # log feed sync event try: if error: notify_event( FeedSyncTaskFailed( groups=list(self.sync_configs.keys()) if self.sync_configs else "all", error=error, ), catalog_client, self.uuid, ) else: notify_event( FeedSyncTaskCompleted( groups=list(self.sync_configs.keys() ) if self.sync_configs else "all"), catalog_client, self.uuid, ) except: logger.exception( "Ignoring event generation error after feed sync (operation_id={})" .format(self.uuid)) try: get_vulnerabilities_provider( ).rescan_images_loaded_during_feed_sync(self.uuid, from_time=start_time, to_time=end_time) except: logger.exception( "Unexpected exception rescanning vulns for images added during the feed sync. (operation_id={})" .format(self.uuid)) raise finally: end_session()
def rescan_images_created_between(self, from_time, to_time): """ If this was a vulnerability update (e.g. timestamps vuln feeds lies in that interval), then look for any images that were loaded in that interval and re-scan the cves for those to ensure that no ordering of transactions caused cves to be missed for an image. This is an alternative to a blocking approach by which image loading is blocked during feed syncs. :param from_time: :param to_time: :return: count of updated images """ if from_time is None or to_time is None: raise ValueError("Cannot process None timestamp") logger.info( "Rescanning images loaded between {} and {} (operation_id={})". format(from_time.isoformat(), to_time.isoformat(), self.uuid)) count = 0 db = get_session() try: # it is critical that these tuples are in proper index order for the primary key of the Images object so that subsequent get() operation works imgs = [(x.id, x.user_id) for x in db.query(Image).filter( Image.created_at >= from_time, Image.created_at <= to_time)] logger.info( "Detected images: {} for rescan (operation_id={})".format( " ,".join([str(x) for x in imgs]) if imgs else "[]", self.uuid)) finally: db.rollback() retry_max = 3 for img in imgs: for i in range(retry_max): try: # New transaction for each image to get incremental progress db = get_session() try: # If the type or ordering of 'img' tuple changes, this needs to be updated as it relies on symmetry of that tuple and the identity key of the Image entity image_obj = db.query(Image).get(img) if image_obj: logger.info( "Rescanning image {} post-vuln sync. (operation_id={})" .format(img, self.uuid)) get_vulnerabilities_provider().load_image( image_obj, db_session=db) count += 1 else: logger.warn( "Failed to lookup image with tuple: {} (operation_id={})" .format(str(img), self.uuid)) db.commit() finally: db.rollback() break except Exception as e: logger.exception( "Caught exception updating vulnerability scan results for image {}. Waiting and retrying (operation_id={})" .format(img, self.uuid)) time.sleep(5) return count