示例#1
0
def retrying_pull_image(staging_dirs,
                        pullstring,
                        registry_creds=None,
                        manifest=None,
                        parent_manifest=None):
    """
    Retry-wrapper on pull image

    :param staging_dirs:
    :param registry_creds:
    :param manifest:
    :param parent_manifest:
    :param dest_type:
    :return:
    """

    try:
        result = pull_image(staging_dirs, pullstring, registry_creds, manifest,
                            parent_manifest)
        if not result:
            # This is an unexpected case, pull_image() will return True or throw exception, but handle weird case anyway to ensure retry works
            raise Exception(
                "Could not pull image for unknown reason. This is an unexpected error path"
            )
    except Exception as err:
        # Intentionally broad, just for logging since retry will swallow individual errors
        logger.debug_exception(
            "Could not pull image due to error: {}. Will retry".format(
                str(err)))
        raise

    return result
示例#2
0
def validate_pullstring_is_digest(pullstring: str) -> bool:
    try:
        parsed = parse_dockerimage_string(pullstring)
        return parsed.get('digest') is not None
    except Exception as e:
        logger.debug_exception('Error parsing pullstring {}. Err = {}'.format(pullstring, e))
        raise ValueError('Error parsing pullstring {}'.format(pullstring))
示例#3
0
def get_oauth_token():
    """
    POST /oauth/token

    Requires the resource-owners credentials in the Authorization Header.

    This is a bit of a mix of the ResourceOwnerPasswordGrant flow and the ImplicitGrant flow since
    this function will populate the necessary fields to perform a password grant if the Authorization
    header is set and no content body is provided

    :return:
    """

    authz = ApiRequestContextProxy.get_service()._oauth_app

    if authz is None:
        # oauth is not enabled and not supported based on configuration
        return make_response_error(errmsg='Oauth not enabled in configuration', in_httpcode=500), 500

    # Add some default properties if not set in the request
    try:
        if request.content_length == 0 or not request.form:
            logger.debug('Handling converting empty body into form-based grant request')

            if not request.data and not request.form:
                setattr(request, 'form', ImmutableMultiDict([('username', request.authorization.username), ('password', request.authorization.password), ('grant_type', 'password'), ('client_id', 'anonymous')]))

        resp = authz.create_token_response()
        logger.debug('Token resp: {}'.format(resp))
        return resp
    except:
        logger.debug_exception('Error authenticating')
        raise
示例#4
0
def list_images(
    tag=None,
    digest=None,
    imageId=None,
    registry_lookup=False,
    history=False,
    image_status="active",
    analysis_status=None,
):
    try:
        request_inputs = anchore_engine.apis.do_request_prep(
            connexion.request,
            default_params={
                "tag": tag,
                "digest": digest,
                "imageId": imageId,
                "registry_lookup": registry_lookup,
                "history": history,
                "image_status": image_status,
                "analysis_status": analysis_status,
            },
        )
        with db.session_scope() as session:
            (
                return_object,
                httpcode,
            ) = anchore_engine.services.catalog.catalog_impl.image(
                session, request_inputs)

    except Exception as err:
        logger.debug_exception("Error listing images")
        httpcode = 500
        return_object = str(err)

    return return_object, httpcode
示例#5
0
def list_images(tag=None,
                digest=None,
                imageId=None,
                registry_lookup=False,
                history=False,
                image_status='active',
                analysis_status=None):
    try:
        request_inputs = anchore_engine.apis.do_request_prep(
            connexion.request,
            default_params={
                'tag': tag,
                'digest': digest,
                'imageId': imageId,
                'registry_lookup': registry_lookup,
                'history': history,
                'image_status': image_status,
                'analysis_status': analysis_status
            })
        with db.session_scope() as session:
            return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image(
                session, request_inputs)

    except Exception as err:
        logger.debug_exception('Error listing images')
        httpcode = 500
        return_object = str(err)

    return return_object, httpcode
示例#6
0
    def authenticate_token(self, authc_token=None):
        if authc_token:
            subject = Yosai.get_current_subject()
            try:
                subject.login(authc_token)
            except:
                logger.debug_exception('Login failed')
                raise

            user = subject.primary_identifier
            logger.debug('Login complete for user: {}'.format(user))
            if isinstance(user, IdentityContext):
                return user
            else:
                # Simple account lookup to ensure the context identity is complete
                try:
                    logger.debug('Loading identity context from username: {}'.format(user))
                    with session_scope() as db_session:
                        idp = self._idp_factory.for_session(db_session)
                        identity, _ = idp.lookup_user(user)

                        logger.debug('Authc complete for user: {}'.format(user))
                        return identity
                except:
                    logger.debug_exception('Error looking up account for authenticated user')
                    return None
        else:
            logger.debug('Anon auth complete')
            return IdentityContext(username=None, user_account=None, user_account_type=None, user_account_state=None)
示例#7
0
def finalize_import_operation(
    db_session,
    account: str,
    operation_id: str,
    import_manifest: ImportManifest,
    final_state: ImportState = ImportState.processing,
) -> InternalImportManifest:
    """
    Finalize the import operation itself

    :param db_session:
    :param account:
    :param operation_id:
    :param import_manifest:
    :return:
    """
    record = (db_session.query(ImageImportOperation).filter_by(
        account=account, uuid=operation_id).one_or_none())
    if not record:
        raise api_exceptions.ResourceNotFound(resource=operation_id, detail={})

    if record.status != ImportState.pending:
        raise api_exceptions.ConflictingRequest(
            message=
            "Invalid operation status. Must be in pending state to finalize",
            detail={"status": record.status.value},
        )

    check_required_content(import_manifest)

    try:
        content_records = verify_import_manifest_content(
            db_session, operation_id, import_manifest)
    except ValueError as ex:
        raise api_exceptions.BadRequest(
            message=
            "One or more referenced content digests not found for the operation id",
            detail={"digest": ex.args[0]},
        )

    try:
        internal_manifest = internal_manifest_from_external(
            import_manifest, content_records)

        # Update the status
        record.status = final_state
        # Queue presence should be gated by the image record, not here
        # queue_import_task(account, operation_id, internal_manifest)
    except:
        logger.debug_exception(
            "Failed to queue task message. Setting failed status")
        record.status = ImportState.failed
        raise

    db_session.flush()

    return internal_manifest
示例#8
0
def get_oauth_token(grant_type="password",
                    username=None,
                    password=None,
                    client_id="anonymous"):
    """
    POST /oauth/token

    Requires the resource-owners credentials in the Authorization Header.

    This is a bit of a mix of the ResourceOwnerPasswordGrant flow and the ImplicitGrant flow since
    this function will populate the necessary fields to perform a password grant if the Authorization
    header is set and no content body is provided

    Note: the parameters above are embedded within the connexion request object, but must be specified in the
    method signature in order for connexion to route the request to this method. So it may appear that they are unused,
    but have no fear, they are!

    :return:
    """

    # Short-circuit if no oauth/token configured
    try:
        tok_mgr = token_manager()
        authz = ApiRequestContextProxy.get_service()._oauth_app
    except Exception as e:
        raise AccessDeniedError("Oauth not enabled in configuration",
                                detail={})

    # Add some default properties if not set in the request
    try:
        if request.content_length == 0 or not request.form:
            logger.debug(
                "Handling converting empty body into form-based grant request")

            if not request.data and not request.form:
                setattr(
                    request,
                    "form",
                    ImmutableMultiDict([
                        ("username", request.authorization.username),
                        ("password", request.authorization.password),
                        ("grant_type", "password"),
                        ("client_id", "anonymous"),
                    ]),
                )

        resp = authz.create_token_response()
        logger.debug("Token resp: {}".format(resp))
        return resp
    except:
        logger.debug_exception("Error authenticating")
        raise
示例#9
0
    def __init__(self, token):
        self._token = token
        self.token_info = None
        self._verified = False
        self._identifier = None
        self._claims = None

        try:
            self._parse()
        except:
            logger.debug_exception('Error parsing/verifying token')
            self._identifier = None
            self._verified = False
            self._claims = None
示例#10
0
    def authenticate_user(self, username, password):
        try:
            authc_token = UsernamePasswordToken(username=username,
                                                password=password,
                                                remember_me=False)

            authorizer = get_authorizer()
            identity = authorizer.inline_authz([], authc_token=authc_token)
            # Use the user's uuid as the username/subject for the token to avoid name conflicts over time
            if identity is None:
                raise Exception('Unknown user')
            else:
                return User(identity.user_uuid)
        except:
            logger.debug_exception('Error authenticating')
            raise Exception('User authentication failed')
示例#11
0
    def authenticate_account(self, authc_token: JwtToken):
        try:
            assert authc_token.identifier is not None

            # Lookup the account info to verify the user identified by the token is still valid
            authc_info = self.get_authentication_info(authc_token.identifier)

            # Overwrite any creds found in db. Cleanup of token vs password is outside the scope of this handler.
            if not authc_info['authc_info']:
                # No user exists for the identifier
                raise IncorrectCredentialsException

            return authc_info
        except:
            logger.debug_exception('Could not authenticate token')
            raise IncorrectCredentialsException()
示例#12
0
def list_event_types():
    request_inputs = anchore_engine.apis.do_request_prep(request, default_params={})
    user_auth = request_inputs['auth']
    method = request_inputs['method']
    bodycontent = request_inputs['bodycontent']
    params = request_inputs['params']

    return_object = {}
    httpcode = 500
    try:
        resp = {} # use dict first, then will convert to list
        for evnt in EventBase.registered_events():
            if evnt.__category__.name not in resp:
                resp[evnt.__category__.name] = {
                    'name': evnt.__category__.name,
                    'description': evnt.__category__.description,
                    'subcategories': {}
                }

            subcats = resp[evnt.__category__.name]['subcategories']

            if evnt.__subcategory__.name not in subcats:
                subcats[evnt.__subcategory__.name] = {
                    'name': evnt.__subcategory__.name,
                    'description': evnt.__subcategory__.description,
                    'events': [_event_to_msg(evnt)]
                }
            else:
                subcats[evnt.__subcategory__.name]['events'].append(_event_to_msg(evnt))

        # Flatten back into lists
        return_object = sorted(resp.values(), key=lambda x: x['name'])
        for cat in return_object:
            cat['subcategories'] = sorted(cat['subcategories'].values(), key=lambda x: x['name'])

        httpcode = 200
    except Exception as err:
        logger.debug_exception('Error listing types')
        return_object = anchore_engine.common.helpers.make_response_error(err, in_httpcode=httpcode)
        httpcode = return_object['httpcode']

    return return_object, httpcode
示例#13
0
    def __init__(
        self,
        credential_fn,
        as_account=None,
        url_provider=default_provider,
        config_provider_fn=localconfig.get_config,
    ):
        """
        Initializes a client for a specific account using the specified credentials (typically the system user credentials)

        :param as_account: The account for which to execute the call as
        """

        # Assert the base type
        self.request_namespace = as_account
        self.auth = None

        self.credential_provider_fn = credential_fn

        self.verify_ssl = True
        self._read_timeout = 0.0
        self._connect_timeout = 0.0

        if config_provider_fn:
            try:
                cfg = config_provider_fn()
            except:
                logger.debug_exception(
                    "Unexpected exception loading configuration from the config provider function"
                )
                raise

            if cfg:
                try:
                    self.verify_ssl = cfg.get("internal_ssl_verify", True)

                    try:
                        self._read_timeout = float(
                            cfg.get("global_client_read_timeout", 0.0)
                        )
                    except ValueError:
                        logger.error(
                            'Invalid value type found in config for "global_client_read_timeout", expected int or float, found {}'.format(
                                type(cfg.get("global_client_read_timeout"))
                            )
                        )
                        pass

                    try:
                        self._connect_timeout = float(
                            cfg.get("global_client_connect_timeout", 0.0)
                        )
                    except ValueError:
                        logger.error(
                            'Invalid value type found in config for "global_client_connect_timeout", expected int or float, found {}'.format(
                                type(cfg.get("global_client_connect_timeout"))
                            )
                        )
                        pass

                    if self._read_timeout < 0:
                        self._read_timeout = 0.0

                    if self._connect_timeout < 0:
                        self._connect_timeout = 0.0
                except:
                    # Default to verify ssl if not set
                    logger.debug_exception(
                        "Could not initialize ssl verification and client timeouts from config due to error"
                    )
                    raise

        self.service_url_provider = url_provider
示例#14
0
    def execute(self, feed_name=None, group_name=None) -> LocalFeedDataRepo:
        """
        Uses the parent method to get the full set of data and spool it to disk, then feeds it to the caller one page at a time.

        :param feed:
        :param group:
        :param since:
        :param next_token:
        :return:
        """

        try:
            self.local_repo.initialize()
            self.local_repo.metadata.download_result = _download_start_metadata(
            )
            self.local_repo.flush_metadata()
        except:
            logger.debug_exception(
                "Could not initialize the feed data download location: {}. Failing fetch attempt"
                .format(self.local_repo.root_dir))
            raise

        groups_failed = 0
        try:
            for group in self.config.groups:
                if (feed_name and group.feed != feed_name
                        or (group_name and group_name != group.group)):
                    # Skip groups that don't match if a specific group was requested
                    logger.debug(
                        "Download configuration has record for group {}/{} but only {}/{} requested, so skipping"
                        .format(group.feed, group.group, feed_name,
                                group_name))
                    continue

                meta = _group_download_start_metadata(group)
                record_count = 0
                try:
                    self.local_repo.metadata.download_result.results.append(
                        meta)
                    self.local_repo.flush_metadata()

                    logger.info("Downloading data for group {}/{}".format(
                        group.feed, group.group))
                    with timer(
                            "data download for group {}/{}".format(
                                group.feed, group.group),
                            log_level="info",
                    ):
                        for count, group_metadata in self._fetch_group_data(
                                group):
                            record_count += count
                            meta.total_records = record_count
                            meta.group_metadata.update(group_metadata)
                            self.local_repo.flush_metadata()

                    _update_download_complete(meta, record_count)
                except:
                    logger.exception(
                        "Error downloading data for group {}/{}".format(
                            group.feed, group.group))
                    # Ensure consistent state for next phase, so cleanup anything failed
                    _update_download_failed(meta, record_count)

                    groups_failed += 1
                finally:
                    self.local_repo.flush_metadata()

            if groups_failed > 0:
                self.local_repo.metadata.download_result.status = (
                    FeedDownloader.State.failed.value)
            else:
                self.local_repo.metadata.download_result.status = (
                    FeedDownloader.State.complete.value)
        except:
            logger.debug_exception(
                "Error fetching feed data, setting status to failed for operation {}"
                .format(self.config.uuid))
            self.local_repo.metadata.download_result.status = (
                FeedDownloader.State.failed.value)
            raise
        finally:
            logger.info("Feed data download process ending")
            self.local_repo.metadata.download_result.ended = datetime.datetime.utcnow(
            )
            self.local_repo.flush_metadata()

        return self.local_repo
示例#15
0
def add_image(
    image_metadata=None,
    tag=None,
    digest=None,
    created_at=None,
    from_archive=False,
    allow_dockerfile_update=False,
):
    try:
        if image_metadata is None:
            image_metadata = {}

        request_inputs = anchore_engine.apis.do_request_prep(
            connexion.request,
            default_params={
                "tag": tag,
                "digest": digest,
                "created_at": created_at,
                "allow_dockerfile_update": allow_dockerfile_update,
            },
        )
        if image_metadata.get("import_operation_id") and from_archive:
            raise BadRequest(
                'Cannot specify both "from_archive=True" query parameter and include an import manifest in the payload',
                detail={},
            )

        if from_archive:
            # Restore an image from the analysis archive into the working set
            task = archiver.RestoreArchivedImageTask(
                account=ApiRequestContextProxy.namespace(),
                image_digest=digest)
            task.start()

            request_inputs["params"] = {}
            request_inputs["method"] = "GET"

            with db.session_scope() as session:
                (
                    return_object,
                    httpcode,
                ) = anchore_engine.services.catalog.catalog_impl.image_imageDigest(
                    session, request_inputs, digest)

        elif image_metadata.get("import_manifest"):
            # Import an image from the upload API
            try:
                import_manifest = ImportManifest.from_json(
                    image_metadata["import_manifest"])
            except Exception as err:
                logger.debug_exception("Error unmarshalling manifest")
                # If we hit this, it means the swagger spec doesn't match the marshmallow scheme
                raise BadRequest(message="invalid import manifest",
                                 detail={"error": str(err)})

            annotations = image_metadata.get("annotations", {})

            # Don't accept an in-line dockerfile
            if image_metadata.get("dockerfile"):
                raise BadRequest(
                    "Cannot provide dockerfile content directly in import payload. Use the import operation APIs to load the dockerfile before calling this endpoint",
                    detail={},
                )

            with db.session_scope() as session:
                # allow_dockerfile_update is a poor proxy for the 'force' option
                return_object = anchore_engine.services.catalog.importer.import_image(
                    session,
                    account=ApiRequestContextProxy.namespace(),
                    operation_id=import_manifest.operation_uuid,
                    import_manifest=import_manifest,
                    force=allow_dockerfile_update,
                    annotations=annotations,
                )
                httpcode = 200
        else:
            # Regular image-add case: analyze from a registry
            with db.session_scope() as session:
                (
                    return_object,
                    httpcode,
                ) = anchore_engine.services.catalog.catalog_impl.image(
                    session, request_inputs, bodycontent=image_metadata)

    except AnchoreApiError:
        raise
    except ImageConflict as img_err:
        httpcode = 409
        return_object = str(img_err)
    except Exception as err:
        logger.exception("Error processing image add")
        httpcode = 500
        return_object = str(err)

    return return_object, httpcode
示例#16
0
def store_analysis_results(
    account: str,
    image_digest: str,
    image_record: dict,
    analysis_result: list,
    image_manifest: dict,
    analysis_events: list,
    image_content_types: list,
):
    """

    :param account:
    :param image_digest:
    :param image_record:
    :param analysis_result:
    :param image_manifest:
    :param analysis_events: list of events that any new events may be added to
    :param image_content_types:
    :return:
    """

    try:
        catalog_client = internal_client_for(CatalogClient, account)
    except:
        logger.debug_exception(
            "Cannot instantiate a catalog client to upload results")
        raise

    imageId = None
    try:
        imageId = analysis_result[0]["image"]["imageId"]
    except Exception as err:
        logger.warn(
            "could not get imageId after analysis or from image record - exception: "
            + str(err))

    logger.info(
        "adding image analysis data to catalog: account={} imageId={} imageDigest={}"
        .format(account, imageId, image_digest))
    try:
        logger.info("Saving raw analysis data to catalog object store")
        rc = catalog_client.put_document("analysis_data", image_digest,
                                         analysis_result)
        if not rc:
            # Ugh this ia big ugly, but need to be sure. Should review CatalogClient and ensure this cannot happen, but for now just handle it.
            raise CatalogClientError(
                msg="Catalog client returned failure",
                cause="Invalid response from catalog API - {}".format(str(rc)),
            )
    except Exception as e:
        err = CatalogClientError(
            msg="Failed to upload analysis data to catalog", cause=e)
        event = events.SaveAnalysisFailed(user_id=account,
                                          image_digest=image_digest,
                                          error=err.to_dict())
        analysis_events.append(event)
        raise err

    try:
        logger.info("Extracting and normalizing image content data locally")
        image_content_data = {}

        # TODO: paginate the content data here keep payloads smaller for clients
        for content_type in image_content_types:
            try:
                image_content_data[
                    content_type] = helpers.extract_analyzer_content(
                        analysis_result, content_type, manifest=image_manifest)
            except Exception as err:
                logger.warn("ERR: {}".format(err))
                image_content_data[content_type] = {}

        if image_content_data:
            logger.info("Adding image content data to archive")
            rc = catalog_client.put_document("image_content_data",
                                             image_digest, image_content_data)

            logger.debug("adding image analysis data to image_record")
            helpers.update_image_record_with_analysis_data(
                image_record, analysis_result)

    except Exception as err:
        import traceback

        traceback.print_exc()
        logger.warn(
            "could not store image content metadata to archive - exception: " +
            str(err))

    # Load the result into the policy engine
    logger.info(
        "adding image to policy engine: account={} imageId={} imageDigest={}".
        format(account, imageId, image_digest))
    try:
        import_to_policy_engine(account, imageId, image_digest)
    except Exception as err:
        newerr = PolicyEngineClientError(
            msg="Adding image to policy-engine failed", cause=str(err))
        event = events.PolicyEngineLoadAnalysisFailed(
            user_id=account, image_digest=image_digest, error=newerr.to_dict())
        analysis_events.append(event)
        raise newerr
示例#17
0
    def dispatch(
        self,
        base_url: str,
        method: callable,
        path: str,
        path_params=None,
        query_params=None,
        extra_headers=None,
        body=None,
        connect_timeout=None,
        read_timeout=None,
        files=None,
    ):
        """
        Execute the request and return the response

        :param base_url:
        :param method:
        :param path:
        :param body:
        :param path_params:
        :param query_params:
        :param extra_headers:
        :return:
        """

        if path_params:
            path_params = {
                name: urllib.parse.quote(value) for name, value in path_params.items()
            }
            final_url = "/".join([base_url, path.format(**path_params)])
        else:
            final_url = "/".join([base_url, path])

        # default is to use the application/json content type, but if 'files' is specified, let requests handle headers for multipart/formdata
        request_headers = copy.copy(self.__headers__)
        if files:
            request_headers = {}

        if self.request_namespace:
            request_headers["x-anchore-account"] = self.request_namespace

        if extra_headers:
            request_headers.update(extra_headers)

        # Remove any None valued query params
        if query_params:
            filtered_qry_params = {
                k: v
                for k, v in filter(lambda x: x[1] is not None, query_params.items())
            }
        else:
            filtered_qry_params = None

        log_body = ensure_str(body[:512]) + "..." if body and len(body) > 512 else body
        logger.debug(
            "Dispatching: url={url}, headers={headers}, body={body}, params={params}, timeout=({conn_timeout}, {read_timeout}), files={files}".format(
                url=final_url,
                headers=request_headers,
                body=log_body,
                params=filtered_qry_params,
                conn_timeout=connect_timeout,
                read_timeout=read_timeout,
                files=files.keys() if files else files,
            )
        )

        auth = self._build_creds()
        try:
            if connect_timeout and connect_timeout <= 0:
                connect_timeout = None
            if read_timeout and read_timeout <= 0:
                read_timeout = None

            if connect_timeout or read_timeout:
                return method(
                    url=final_url,
                    headers=request_headers,
                    data=body,
                    auth=auth,
                    params=filtered_qry_params,
                    verify=self.verify_ssl,
                    timeout=(connect_timeout, read_timeout),
                    files=files,
                )
            else:
                return method(
                    url=final_url,
                    headers=request_headers,
                    data=body,
                    auth=auth,
                    params=filtered_qry_params,
                    verify=self.verify_ssl,
                    files=files,
                )
        except Exception as e:
            logger.debug_exception(
                "Failed client call to service {} for url: {}. Response: {}".format(
                    self.__service__, final_url, e.__dict__
                )
            )
            raise e
示例#18
0
def import_image(operation_id, account,
                 import_manifest: InternalImportManifest):
    """
    The main thread of exec for importing an image

    :param operation_id:
    :param account:
    :param import_manifest:
    :return:
    """
    timer = int(time.time())
    analysis_events = []

    config = localconfig.get_config()
    all_content_types = config.get("image_content_types", []) + config.get(
        "image_metadata_types", [])
    image_digest = import_manifest.digest

    try:
        catalog_client = internal_client_for(CatalogClient, account)

        # check to make sure image is still in DB
        catalog_client = internal_client_for(CatalogClient, account)
        try:
            image_record = catalog_client.get_image(image_digest)
            if not image_record:
                raise Exception("empty image record from catalog")
        except Exception as err:
            logger.debug_exception("Could not get image record")
            logger.warn(
                "dequeued image cannot be fetched from catalog - skipping analysis ("
                + str(image_digest) + ") - exception: " + str(err))
            return True

        if image_record["analysis_status"] != taskstate.base_state("analyze"):
            logger.info(
                "dequeued image to import is not in base 'not_analyzed' state - skipping import"
            )
            return True

        try:
            last_analysis_status = image_record["analysis_status"]
            image_record = update_analysis_started(catalog_client,
                                                   image_digest, image_record)

            logger.info("Loading content from import")
            sbom_map = get_content(import_manifest, catalog_client)

            manifest = sbom_map.get("manifest")

            try:
                logger.info("processing image import data")
                image_data, analysis_manifest = process_import(
                    image_record, sbom_map, import_manifest)
            except AnchoreException as e:
                event = events.ImageAnalysisFailed(user_id=account,
                                                   image_digest=image_digest,
                                                   error=e.to_dict())
                analysis_events.append(event)
                raise

            # Store the manifest in the object store
            logger.info("storing image manifest")
            catalog_client.put_document(bucket="manifest_data",
                                        name=image_digest,
                                        inobj=json.dumps(manifest))

            # Save the results to the upstream components and data stores
            logger.info("storing import result")
            store_analysis_results(
                account,
                image_digest,
                image_record,
                image_data,
                manifest,
                analysis_events,
                all_content_types,
            )

            logger.info("updating image catalog record analysis_status")
            last_analysis_status = image_record["analysis_status"]
            image_record = update_analysis_complete(catalog_client,
                                                    image_digest, image_record)
            try:
                analysis_events.extend(
                    notify_analysis_complete(image_record,
                                             last_analysis_status))
            except Exception as err:
                logger.warn(
                    "failed to enqueue notification on image analysis state update - exception: "
                    + str(err))

            logger.info("analysis complete: " + str(account) + " : " +
                        str(image_digest))

            try:
                catalog_client.update_image_import_status(operation_id,
                                                          status="complete")
            except Exception as err:
                logger.debug_exception(
                    "failed updating import status success, will continue and rely on expiration for GC later"
                )

            try:
                metrics.counter_inc(name="anchore_import_success")
                run_time = float(time.time() - timer)

                metrics.histogram_observe(
                    "anchore_import_time_seconds",
                    run_time,
                    buckets=IMPORT_TIME_SECONDS_BUCKETS,
                    status="success",
                )

            except Exception as err:
                logger.warn(str(err))

        except Exception as err:
            run_time = float(time.time() - timer)
            logger.exception("problem importing image - exception: " +
                             str(err))
            analysis_failed_metrics(run_time)

            # Transition the image record to failure status
            image_record = update_analysis_failed(catalog_client, image_digest,
                                                  image_record)

            try:
                catalog_client.update_image_import_status(operation_id,
                                                          status="failed")
            except Exception as err:
                logger.debug_exception(
                    "failed updating import status failure, will continue and rely on expiration for GC later"
                )

            if account and image_digest:
                for image_detail in image_record["image_detail"]:
                    fulltag = (image_detail["registry"] + "/" +
                               image_detail["repo"] + ":" +
                               image_detail["tag"])
                    event = events.UserAnalyzeImageFailed(user_id=account,
                                                          full_tag=fulltag,
                                                          error=str(err))
                    analysis_events.append(event)
        finally:
            if analysis_events:
                emit_events(catalog_client, analysis_events)

    except Exception as err:
        logger.debug_exception("Could not import image")
        logger.warn("job processing bailed - exception: " + str(err))
        raise err

    return True
示例#19
0
    def __init__(self,
                 credential,
                 as_account=None,
                 url_provider=default_provider,
                 config_provider_fn=localconfig.get_config):
        """
        Initializes a client for a specific account using the specified credentials (typically the system user credentials)

        :param as_account: The account for which to execute the call as
        """

        # Assert the base type
        self.request_namespace = as_account
        self.auth = None

        if isinstance(credential, HttpBearerCredential):
            token = credential.get_creds()
            self.auth = BearerTokenAuth(token)

        elif isinstance(credential, HttpBasicCredential):
            self.auth = HTTPBasicAuth(credential.get_creds()[0],
                                      credential.get_creds()[1])
        else:
            raise TypeError('credential not of expected type')

        self.verify_ssl = True
        self._read_timeout = 0.0
        self._connect_timeout = 0.0

        if config_provider_fn:
            try:
                cfg = config_provider_fn()
            except:
                logger.debug_exception(
                    'Unexpected exception loading configuration from the config provider function'
                )
                raise

            if cfg:
                try:
                    self.verify_ssl = cfg.get('internal_ssl_verify', True)

                    try:
                        self._read_timeout = float(
                            cfg.get('global_client_read_timeout', 0.0))
                    except ValueError:
                        logger.error(
                            'Invalid value type found in config for "global_client_read_timeout", expected int or float, found {}'
                            .format(type(
                                cfg.get('global_client_read_timeout'))))
                        pass

                    try:
                        self._connect_timeout = float(
                            cfg.get('global_client_connect_timeout', 0.0))
                    except ValueError:
                        logger.error(
                            'Invalid value type found in config for "global_client_connect_timeout", expected int or float, found {}'
                            .format(
                                type(
                                    cfg.get('global_client_connect_timeout'))))
                        pass

                    if self._read_timeout < 0:
                        self._read_timeout = 0.0

                    if self._connect_timeout < 0:
                        self._connect_timeout = 0.0
                except:
                    # Default to verify ssl if not set
                    logger.debug_exception(
                        'Could not initialize ssl verification and client timeouts from config due to error'
                    )
                    raise

        self.service_url_provider = url_provider