def handle_tar_error(tarcmd, rc, sout, serr, unpackdir, rootfsdir, layer, layertar): handled = False try: slinkre = "tar: (.*): Cannot open: File exists" for errline in serr.splitlines(): patt = re.match(slinkre, errline) if patt: matchfile = patt.group(1) logger.debug("found 'file exists' error on name: " + str(matchfile)) if matchfile: badfile = os.path.join(rootfsdir, patt.group(1)) if os.path.exists(badfile): logger.debug("removing hierarchy: " + str(badfile)) shutil.rmtree(badfile) handled = True except Exception as err: raise err return (handled)
def query_client(client_id): logger.debug('Looking up client: {}'.format(client_id)) # if client_id == 'anonymous': # db = get_session() # c = db.query(OAuth2Client).filter_by(client_id=client_id).first() # logger.debug('Found client: {}'.format(c)) # else: # logger.debug('Using default user client') # # Return a default client to support implicit flows c = OAuth2Client() c.id = 0 c.client_id = client_id c.user_id = client_id c.client_secret = '' c.issued_at = time.time() - 100 c.expires_at = time.time() + 1000 c.grant_type = 'password' c.token_endpoint_auth_method = 'none' c.client_name = 'default' return c
def ping_docker_registry(registry_record): ret = False user = '' url = '' try: registry = registry_record['registry'] verify = registry_record['registry_verify'] if registry in ['docker.io']: url = "https://index.docker.io" else: url = "https://" + registry user, pw = anchore_engine.auth.common.get_docker_registry_userpw(registry_record) drc = docker_registry_client.DockerRegistryClient(url, username=user, password=pw, verify_ssl=verify) logger.debug("registry access check success ("+str(url)+","+str(user)+")") ret = True except Exception as err: logger.warn("failed check to access registry ("+str(url)+","+str(user)+") - exception: " + str(err)) raise Exception("failed check to access registry ("+str(url)+","+str(user)+") - exception: " + str(err)) #ret = False return(ret)
def delete_staging_dirs(staging_dirs): for k in list(staging_dirs.keys()): if k == 'cachedir': continue localconfig = anchore_engine.configuration.localconfig.get_config() myconfig = localconfig.get('services', {}).get('analyzer', {}) if not myconfig.get('keep_image_analysis_tmpfiles', False): try: if os.path.exists(staging_dirs[k]): logger.debug("removing dir: " + k + " : " + str(staging_dirs[k])) shutil.rmtree(staging_dirs[k]) except Exception as err: raise Exception( "unable to delete staging directory - exception: " + str(err)) else: logger.debug( "keep_image_analysis_tmpfiles is enabled - leaving analysis tmpdir in place {}" .format(staging_dirs)) return (True)
def __new__(cls): # If the singleton has not been initialized yet, do so with the instance variables below if cls._grype_wrapper_instance is None: logger.debug("Initializing Grype wrapper instance.") # The singleton instance, only instantiated once outside of testing cls._grype_wrapper_instance = super(GrypeWrapperSingleton, cls).__new__(cls) # These variables are mutable, their state can be changed when grype_db is updated cls._grype_db_dir_internal = None cls._grype_db_version_internal = None cls._grype_db_session_maker_internal = None # These variables are also mutable. They are for staging updated grye_dbs. cls._staging_grype_db_dir_internal = None cls._staging_grype_db_version_internal = None cls._staging_grype_db_session_maker_internal = None # The reader-writer lock for this class cls._grype_db_lock = rwlock.RWLockWrite() # Return the singleton instance return cls._grype_wrapper_instance
def lookup_imageDigest_from_imageId(request_inputs, imageId): user_auth = request_inputs['auth'] method = request_inputs['method'] bodycontent = request_inputs['bodycontent'] params = request_inputs['params'] userId, pw = user_auth ret = None try: image_records = catalog.get_image(user_auth, imageId=imageId) if image_records: image_record = image_records[0] imageDigest = image_record['imageDigest'] ret = imageDigest except Exception as err: logger.debug("operation exception: " + str(err)) raise err return (ret)
def queue_import_task(account: str, operation_id: str, manifest: InternalImportManifest) -> bool: """ Queue the task for analysis :param account: :param manifest: :return: """ # Replace this is a state watcher, similar to the image state handlers logger.debug("Queueing import task for account %s operation id %s", account, operation_id) task = ImportQueueMessage() task.account = account task.manifest = manifest task.operation_uuid = operation_id task.image_digest = manifest.digest q_client = internal_client_for(SimpleQueueClient, userId=account) resp = q_client.enqueue(name=IMPORT_QUEUE, inobj=task.to_json()) logger.debug("Queue task response: %s", str(resp)) return True
def get_catalog_endpoint(): global localconfig, headers if localconfig == None: logger.debug('initializing catalog endpoint') localconfig = anchore_engine.configuration.localconfig.get_config() logger.debug('loaded config: {}'.format(localconfig)) servicename = "catalog" base_url = "" try: service = None if 'catalog_endpoint' in localconfig: base_url = re.sub("/+$", "", localconfig['catalog_endpoint']) else: with db.session_scope() as dbsession: service_reports = db.db_services.get_byname(servicename, session=dbsession) if service_reports: service = service_reports[0] if not service: raise Exception("cannot locate registered service in DB: " + servicename) endpoint = service['base_url'] if endpoint: apiversion = service['version'] base_url = '/'.join([endpoint, apiversion]) else: raise Exception("cannot load valid endpoint from DB") except Exception as err: raise Exception("could not find valid endpoint - exception: " + str(err)) return (base_url)
def put(self, userId, bucket, archiveid, data): """ Expects a json parsed payload to write :param userId: :param bucket: :param archiveid: :param data: string data to write :return: digest of the stored content """ if not self.primary_client: raise Exception("archive not initialized") try: final_payload, is_compressed = self._do_compress(data) size = len(final_payload) digest = hashlib.md5(final_payload).hexdigest() url = self.primary_client.put(userId, bucket, archiveid, final_payload) with session_scope() as dbsession: db_archivemetadata.add( userId, bucket, archiveid, archiveid + ".json", url, is_compressed=is_compressed, content_digest=digest, size=size, session=dbsession, ) except Exception as err: logger.debug("cannot put data: exception - " + str(err)) raise err return digest
def images_imageDigest(request_inputs, imageDigest): user_auth = request_inputs['auth'] method = request_inputs['method'] bodycontent = request_inputs['bodycontent'] params = request_inputs['params'] return_object = {} httpcode = 500 userId, pw = user_auth try: if method == 'GET': logger.debug("handling GET on imageDigest: " + str(imageDigest)) image_records = catalog.get_image(user_auth, imageDigest=imageDigest) if image_records: return_object = [] for image_record in image_records: #try: # query_data = catalog.get_document(user_auth, 'query_data', imageDigest) # if 'anchore_image_summary' in query_data and query_data['anchore_image_summary']: # logger.debug("getting image summary data") #except Exception as err: # logger.warn("cannot get image summary data for image: " + str(imageDigest)) #try: # image_content_metadata = get_image_summary(user_auth, image_record) #except: # image_content_metadata = {} return_object.append(make_response_image(user_auth, image_record, params)) httpcode = 200 else: httpcode = 404 raise Exception("cannot locate specified image") elif method == 'DELETE': logger.debug("handling DELETE on imageDigest: " + str(imageDigest)) rc = False try: rc = catalog.delete_image(user_auth, imageDigest) except Exception as err: raise err if rc: return_object = rc httpcode = 200 else: httpcode = 500 raise Exception("failed to delete") except Exception as err: logger.debug("operation exception: " + str(err)) return_object = anchore_engine.services.common.make_response_error(err, in_httpcode=httpcode) httpcode = return_object['httpcode'] return (return_object, httpcode)
def get_feed_group_data(self, feed, group, since=None, next_token=None): if since and not isinstance(since, datetime.datetime): raise TypeError('since should be a datetime object') baseurl = self.group_data_url.format(feed=feed, group=group) if since: baseurl += "?since={}".format(since.isoformat()) if next_token: url = baseurl + '&next_token={}'.format(next_token) else: url = baseurl elif next_token: url = baseurl + '?next_token={}'.format(next_token) else: url = baseurl group_data = None logger.debug("data group url: " + str(url)) try: record = self.http_client.execute_request(requests.get, url) if record['success']: data = json.loads(record['text']) if 'data' in data: group_data = data['data'] if 'next_token' in data and data['next_token']: next_token = data['next_token'] else: next_token = None return GroupData(data=group_data, next_token=next_token, since=since) else: raise Exception( 'Feed list operation failed. Msg: {}. Response: {}'.format(record.get('err_msg'), record.get('text'))) except Exception as e: logger.debug('Error executing feed listing: {}'.format(e)) raise e
def get_policy(policyId, detail=None): request_inputs = anchore_engine.services.common.do_request_prep( request, default_params={'detail': True}) user_auth = request_inputs['auth'] bodycontent = request_inputs['bodycontent'] params = request_inputs['params'] return_object = {} httpcode = 500 userId, pw = user_auth try: logger.debug('Get policy by bundle Id') try: policy_records = catalog.get_policy(user_auth, policyId=policyId) except Exception as err: logger.warn("unable to get policy_records for user (" + str(userId) + ") - exception: " + str(err)) policy_records = [] if policy_records: ret = [] for policy_record in policy_records: ret.append( make_response_policy(user_auth, policy_record, params)) return_object = ret httpcode = 200 else: httpcode = 404 raise Exception("cannot locate specified policyId") except Exception as err: return_object = anchore_engine.services.common.make_response_error( err, in_httpcode=httpcode) httpcode = return_object['httpcode'] return (return_object, httpcode)
def perform_analyze_nodocker(userId, manifest, image_record, registry_creds, layer_cache_enable=False): ret_analyze = {} ret_query = {} localconfig = anchore_engine.configuration.localconfig.get_config() try: tmpdir = localconfig['tmp_dir'] except Exception as err: logger.warn("could not get tmp_dir from localconfig - exception: " + str(err)) tmpdir = "/tmp" use_cache_dir = None if layer_cache_enable: use_cache_dir = os.path.join(tmpdir, "anchore_layercache") # choose the first TODO possible more complex selection here try: image_detail = image_record['image_detail'][0] registry_manifest = manifest pullstring = image_detail['registry'] + "/" + image_detail[ 'repo'] + "@" + image_detail['imageDigest'] fulltag = image_detail['registry'] + "/" + image_detail[ 'repo'] + ":" + image_detail['tag'] logger.debug("using pullstring (" + str(pullstring) + ") and fulltag (" + str(fulltag) + ") to pull image data") except Exception as err: image_detail = pullstring = fulltag = None raise Exception( "failed to extract requisite information from image_record - exception: " + str(err)) timer = int(time.time()) logger.spew("TIMING MARK0: " + str(int(time.time()) - timer)) logger.info("performing analysis on image: " + str([userId, pullstring, fulltag])) logger.debug("obtaining anchorelock..." + str(pullstring)) with localanchore.get_anchorelock(lockId=pullstring, driver='nodocker'): logger.debug("obtaining anchorelock successful: " + str(pullstring)) analyzed_image_report = localanchore_standalone.analyze_image( userId, registry_manifest, image_record, tmpdir, localconfig, registry_creds=registry_creds, use_cache_dir=use_cache_dir) ret_analyze = analyzed_image_report logger.info("performing analysis on image complete: " + str(pullstring)) return (ret_analyze)
def run_query(pullstring, query): ret = {} query_name = query['name'] params = [] for p in query['params']: param = p['key'] if 'val' in p and p['val']: param = param + "=" + p['val'] params.append(param) cmd = ['anchore', '--json', 'query', '--image', pullstring, query_name ] + params try: logger.debug("running query: " + str(' '.join(cmd))) try: sout = subprocess.check_output(cmd) except subprocess.CalledProcessError as err: if (err.returncode == 1 or err.returncode == 2) and err.output: sout = err.output.decode('utf8') else: sout = "invalid query" except Exception as err: raise err try: query_result = json.loads(sout) except Exception as err: query_result = {'error': str(sout)} ret = query_result except Exception as err: raise err return (ret)
def get_tar_filenames(layertar): ret = [] layertarfile = None try: logger.debug( "using tarfile library to get file names from tarfile={}".format( layertar)) layertarfile = tarfile.open(layertar, mode='r', format=tarfile.PAX_FORMAT) ret = layertarfile.getnames() except: # python tarfile fils to unpack some docker image layers due to PAX header issue, try another method logger.debug( "using tar command to get file names from tarfile={}".format( layertar)) tarcmd = "tar tf {}".format(layertar) try: ret = [] rc, sout, serr = utils.run_command(tarcmd) sout = utils.ensure_str(sout) serr = utils.ensure_str(serr) if rc == 0 and sout: for line in sout.splitlines(): re.sub("/+$", "", line) ret.append(line) else: raise Exception("rc={} sout={} serr={}".format(rc, sout, serr)) except Exception as err: logger.error("command failed with exception - " + str(err)) raise err finally: if layertarfile: layertarfile.close() return (ret)
def list_policies(detail=None): request_inputs = anchore_engine.apis.do_request_prep( request, default_params={'detail': False}) user_auth = request_inputs['auth'] bodycontent = request_inputs['bodycontent'] params = request_inputs['params'] return_object = [] httpcode = 500 userId = request_inputs['userId'] try: logger.debug('Listing policies') client = internal_client_for(CatalogClient, request_inputs['userId']) try: policy_records = client.list_policies() except Exception as err: logger.warn("unable to get policy_records for user (" + str(userId) + ") - exception: " + str(err)) raise err if policy_records: httpcode = 200 ret = [] for policy_record in policy_records: ret.append(make_response_policy(policy_record, params)) return_object = ret # else: # httpcode = 404 # raise Exception('no policies found for user') except Exception as err: logger.debug("operation exception: " + str(err)) return_object = anchore_engine.common.helpers.make_response_error( err, in_httpcode=httpcode) httpcode = return_object['httpcode'] return (return_object, httpcode)
def do_import_image(request_inputs, importRequest): user_auth = request_inputs['auth'] method = request_inputs['method'] bodycontent = request_inputs['bodycontent'] params = request_inputs['params'] return_object = {} httpcode = 500 userId, pw = user_auth try: return_object = [] image_records = catalog.import_image(user_auth, json.loads(bodycontent)) for image_record in image_records: return_object.append(make_response_image(user_auth, image_record, params)) httpcode = 200 except Exception as err: logger.debug("operation exception: " + str(err)) return_object = anchore_engine.services.common.make_response_error(err, in_httpcode=httpcode) httpcode = return_object['httpcode'] return(return_object, httpcode)
def send_test_notification(webhooks, webhook, request_inputs, webhook_type, notification_type): """ This Method actually gathers all the parameters needed for notifications to actually send the webhook :param webhooks: webhooks loaded from localconfig :param webhook: the webhook object for webhook_type :param request_inputs: the request inputs (used to resolve userId) :param webhook_type: webhook type to send (used to build payload) :return: result of webhook and http code (200 if successful, 500 if we fail to build test notification or payload """ httpcode = 500 rootuser = webhooks.pop('webhook_user', None) rootpw = webhooks.pop('webhook_pass', None) rootverify = webhooks.pop('ssl_verify', None) subvars = [('<userId>', request_inputs['userId']), ('<notification_type>', 'test')] try: notification = get_test_notification(notification_type, request_inputs) except Exception as err: return_object = anchore_engine.common.helpers.make_response_error(err, in_httpcode=httpcode) return return_object, httpcode logger.debug('build payload: {}'.format(notification.to_json())) try: payload = notification.to_json() except Exception as err: return_object = anchore_engine.common.helpers.make_response_error(err, in_httpcode=httpcode) return return_object, httpcode return notifications.do_notify_webhook_type(webhook=webhook, user=webhook.pop('webhook_user', rootuser), pw=webhook.pop('webhook_pass', rootpw), verify=webhook.pop('ssl_verify', rootverify), subvars=subvars, payload=payload), 200
def _are_match_equivalent(vulnerability_a, vulnerability_b): """ Returns true if the two records (including child fixedin and/or vulnerablein records) are equivalent in terms of package matching. TODO: move this logic to an vuln-scan abstraction, but that abstraction needs more work before it's ready. Would like to keep the definition of what impacts matches centralized so as not to get out-of-sync. :param vulnerability_a: :param vulnerability_b: :return: """ if (not (vulnerability_a and vulnerability_b) or vulnerability_a.id != vulnerability_b.id or vulnerability_a.namespace_name != vulnerability_b.namespace_name): # They aren't the same item reference logger.debug( "Vuln id or namespaces are different: {} {} {} {}".format( vulnerability_a.id, vulnerability_b.id, vulnerability_a.namespace_name, vulnerability_b.namespace_name, )) return False normalized_fixes_a = {(fix.name, fix.epochless_version, fix.version) for fix in vulnerability_a.fixed_in} normalized_fixes_b = {(fix.name, fix.epochless_version, fix.version) for fix in vulnerability_b.fixed_in} fix_diff = normalized_fixes_a.symmetric_difference(normalized_fixes_b) if fix_diff: logger.debug("Fixed In records diff: {}".format(fix_diff)) return False return True
def get(self, userId, bucket, key): if not self.initialized: raise Exception("archive not initialized") try: with db.session_scope() as dbsession: result = db_objectstorage.get(userId, bucket, key, session=dbsession) if result and 'content' in result: data = result.get('content') if data is not None: return result.get('content').decode('utf8') else: return None else: raise ObjectKeyNotFoundError(userId, bucket, key, caused_by=None) except Exception as err: logger.debug("cannot get data: exception - " + str(err)) raise err
def do_connect(db_params): global engine, Session, SerializableSession db_connect = db_params.get('db_connect', None) db_connect_args = db_params.get('db_connect_args', None) db_pool_size = db_params.get('db_pool_size', None) db_pool_max_overflow = db_params.get('db_pool_max_overflow', None) db_echo = db_params.get('db_echo', False) if db_connect: try: if db_connect.startswith('sqlite://'): # Special case for testing with sqlite. Not for production use, unit tests only engine = sqlalchemy.create_engine(db_connect, echo=db_echo) else: logger.debug("database connection args {} db_echo={}".format(db_connect_args, db_echo)) engine = sqlalchemy.create_engine(db_connect, connect_args=db_connect_args, echo=db_echo, pool_size=db_pool_size, max_overflow=db_pool_max_overflow) except Exception as err: raise Exception("could not connect to DB - exception: " + str(err)) else: raise Exception( "could not locate db_connect string from configuration: add db_connect parameter to configuration file") # set up the global session try: # SerializableSession = sessionmaker(bind=engine.execution_options(isolation_level='SERIALIZABLE')) Session = sessionmaker(bind=engine) except Exception as err: raise Exception("could not create DB session - exception: " + str(err)) # set up thread-local session factory init_thread_session() return(True)
def update_image_status( session: Session, account: str, image_digest: str, old_statuses: list, new_status: str, ) -> str: current_record = (session.query(ArchivedImage).filter_by( account=account, imageDigest=image_digest).options( lazyload(ArchivedImage._tags)).one_or_none()) logger.debug( "Updating archive image status from one of: {} to {} for {}/{} w/record: {}" .format(old_statuses, new_status, account, image_digest, current_record)) if current_record: if current_record.status not in old_statuses: raise Exception("Status mismatch") else: current_record.status = new_status else: return None return new_status
def execute(self): with timer( "image vulnerabilities refresh for %s/%s" % (self.message.account_id, self.message.image_id), log_level="info", ): logger.debug( "Refreshing image vulnerabilities report for account_id=%s, image_id=%s, image_digest=%s", self.message.account_id, self.message.image_id, self.message.image_digest, ) with session_scope() as session: img = (session.query(Image).filter( Image.user_id == self.message.account_id, Image.id == self.message.image_id, ).one_or_none()) # lookup image first if not img: logger.debug( "No record found for image account=%s, image_id=%s, skipping refresh", self.message.account_id, self.message.image_id, ) return # call the provider with vendor_only and force disabled get_vulnerabilities_provider().get_image_vulnerabilities_json( image=img, vendor_only=False, db_session=session, force_refresh=False, use_store=True, )
def list_feed_groups(self, feed: str) -> FeedGroupList: group_list = FeedGroupList(groups=[]) more_data = True next_token = None while more_data: url = self.group_url.format(feed=feed) + ( ("?next_token=" + next_token) if next_token else "") try: record = self.http_client.execute_request( requests.get, url, retries=self.retry_count) if record["success"]: data = json.loads(ensure_str(record["content"])) if "groups" in data: group_list.groups.extend([ FeedAPIGroupRecord( name=x.get("name"), description=x.get("description"), access_tier=x.get("access_tier"), ) for x in data["groups"] ]) if "next_token" in data and data["next_token"]: next_token = data["next_token"] more_data = True else: more_data = False else: raise Exception( "Feed list operation failed. Msg: {}. Response: {}". format(record.get("err_msg"), record.get("text"))) except Exception as e: logger.debug("Error executing feed listing: {}".format(e)) raise e return group_list
def policy_engine_image_load(client, imageUserId, imageId, imageDigest): resp = None try: request = ImageIngressRequest(user_id=imageUserId, image_id=imageId, fetch_url='catalog://' + str(imageUserId) + '/analysis_data/' + str(imageDigest)) #request = ImageIngressRequest() #request.user_id = imageUserId #request.image_id = imageId #request.fetch_url='catalog://'+str(imageUserId)+'/analysis_data/'+str(imageDigest) logger.debug("policy engine request (image add): " + str(request)) resp = client.ingress_image(request) logger.spew("policy engine response (image add): " + str(resp)) except Exception as err: logger.error("failed to add/check image: " + str(err)) raise err return (resp)
def authorize(self, identity: IdentityContext, permission_list): subject = Yosai.get_current_subject() if subject.primary_identifier != identity: logger.debug( 'Mismatch between subject and provided identity for the authorization. Failing authz' ) raise UnauthorizedError(permission_list) # Do account state check here for authz rather than in the authc path since it's a property of an authenticated user self._check_calling_user_account_state(identity) self._exec_permission_check(subject, permission_list) # Check only after the perms check. Match any allowed permissions that use the namespace as the domain for the authz request non_enabled_domains = self._disabled_domains(permission_list) logger.debug( 'Found disabled domains found: {}'.format(non_enabled_domains)) # If found domains not enabled and the caller is not a system service or system admin, disallow if non_enabled_domains and identity.user_account_type not in [ AccountTypes.admin, AccountTypes.service ]: raise AccountStateError(non_enabled_domains[0])
def do_cached_pagination(input_items, page=None, limit=None, dosort=True, sortfunc=lambda x: x, query_digest="", ttl=0.0): current_time = time.time() if ttl <= 0.0: logger.debug("skipping cache as ttl is <= 0.0 ({})".format(ttl)) elif query_digest not in pagination_cache: logger.debug("caching query content") pagination_cache[query_digest] = { 'ttl': current_time + float(ttl), 'content': list(input_items), } return (do_simple_pagination(input_items, page=page, limit=limit, dosort=dosort, sortfunc=sortfunc, query_digest=query_digest, ttl=ttl))
def _try_parse_cvss(cvss_list: List[Dict]) -> List[CVSS]: """ Best effort attempt at parsing CVSS from response. Ignores any errors raised and chugs along """ cvss_objects = [] if isinstance(cvss_list, list) and cvss_list: for cvss_dict in cvss_list: try: cvss_objects.append( CVSS( version=cvss_dict.get("version"), vector=cvss_dict.get("vector"), base_score=cvss_dict.get("metrics", {}).get( "baseScore", -1.0), exploitability_score=cvss_dict.get( "metrics", {}).get("exploitabilityScore", -1.0), impact_score=cvss_dict.get("metrics", {}).get( "impactScore", -1.0), )) except (AttributeError, ValueError): log.debug("Ignoring error parsing CVSS dict %s", cvss_dict) return cvss_objects
def handle_feed_sync(*args, **kwargs): """ Initiates a feed sync in the system in response to a message from the queue :param args: :param kwargs: :return: """ system_user = _system_creds() logger.info('init args: {}'.format(kwargs)) cycle_time = kwargs['mythread']['cycle_timer'] while True: config = localconfig.get_config() feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True) if feed_sync_enabled: try: all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue']) if not all_ready: logger.info("simplequeue service not yet ready, will retry") else: try: simplequeue.run_target_with_queue_ttl(system_user, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180) except Exception as err: logger.warn("failed to process task this cycle: " + str(err)) except Exception as e: logger.error('Caught escaped error in feed sync handler: {}'.format(e)) else: logger.debug("sync_enabled is set to false in config - skipping feed sync") time.sleep(cycle_time) return True
def ping_docker_registry(registry_record): ret = False user = '' url = '' try: registry = registry_record['registry'] registry = registry.split('/', 1)[0] verify = registry_record['registry_verify'] if registry in ['docker.io']: url = "https://index.docker.io" else: url = "https://" + registry user, pw = anchore_engine.auth.common.get_docker_registry_userpw( registry_record) httpcode, message = ping_docker_registry_v2(url, user, pw, verify=verify) if httpcode != 200: raise Exception("{}".format(message)) logger.debug( "registry check successful: registry={} user={} code={} message={}" .format(registry, user, httpcode, message)) ret = True except Exception as err: logger.warn("failed check to access registry (" + str(url) + "," + str(user) + ") - exception: " + str(err)) raise Exception("failed check to access registry (" + str(url) + "," + str(user) + ") - exception: " + str(err)) return ret