示例#1
0
def delete_policy(policyId, cleanup_evals=False):
    """
    DELETE /policies/{policyId}?cleanup_evals=true|false

    :param user_context:
    :param policyId:
    :param cleanup_evals:
    :return:
    """

    httpcode = 200
    return_object = True

    try:
        with db.session_scope() as dbsession:
            request_inputs = anchore_engine.apis.do_request_prep(
                connexion.request, default_params={})
            user_id = request_inputs['userId']

            policy_record = db_policybundle.get(user_id,
                                                policyId,
                                                session=dbsession)

            if policy_record:
                rc, httpcode = do_policy_delete(user_id,
                                                policy_record,
                                                dbsession,
                                                force=True,
                                                cleanup_evals=cleanup_evals)
                if httpcode not in list(range(200, 299)):
                    raise Exception(str(rc))

            return return_object, httpcode
    except Exception as ex:
        logger.exception('Uncaught exception')
        raise ex
def image_get(tag=None,
              digest=None,
              imageId=None,
              registry_lookup=False,
              history=False):
    try:
        request_inputs = anchore_engine.services.common.do_request_prep(
            connexion.request,
            default_params={
                'tag': tag,
                'digest': digest,
                'imageId': imageId,
                'registry_lookup': registry_lookup,
                'history': history
            })
        with db.session_scope() as session:
            return_object, httpcode = anchore_engine.services.catalog.catalog_impl.image(
                session, request_inputs)

    except Exception as err:
        httpcode = 500
        return_object = str(err)

    return (return_object, httpcode)
示例#3
0
def list_archives():
    """
    GET /archives

    :return:
    """
    try:
        with session_scope() as session:
            imgs = db_archived_images.summarize(session) or []
            rules = (
                session.query(ArchiveTransitionRule)
                .filter_by(account=ApiRequestContextProxy.namespace())
                .all()
                or []
            )
            rule_count = len(rules)
            newest = None
            if rule_count > 0:
                newest = epoch_to_rfc3339(max(map(lambda x: x.last_updated, rules)))

        return {"images": imgs, "rules": {"count": rule_count, "last_updated": newest}}
    except Exception as ex:
        logger.exception("Failed to list archives")
        return make_response_error(ex, in_httpcode=500), 500
示例#4
0
    def put(self, userId, bucket, archiveid, data):
        """
        Expects a json parsed payload to write

        :param userId:
        :param bucket:
        :param archiveid:
        :param data: string data to write
        :return:
        """
        if not self.primary_client:
            raise Exception("archive not initialized")

        try:
            final_payload, is_compressed = self._do_compress(data)

            size = len(final_payload)
            digest = hashlib.md5(final_payload).hexdigest()

            url = self.primary_client.put(userId, bucket, archiveid,
                                          final_payload)
            with session_scope() as dbsession:
                db_archivemetadata.add(userId,
                                       bucket,
                                       archiveid,
                                       archiveid + ".json",
                                       url,
                                       is_compressed=is_compressed,
                                       content_digest=digest,
                                       size=size,
                                       session=dbsession)
        except Exception as err:
            logger.debug("cannot put data: exception - " + str(err))
            raise err

        return True
示例#5
0
def list_migrations():

    with session_scope() as db:
        tasks = db_tasks.get_all(task_type=ArchiveMigrationTask,
                                 session=db,
                                 json_safe=True)

    fields = [
        'id', 'state', 'started_at', 'ended_at', 'migrate_from_driver',
        'migrate_to_driver', 'archive_documents_migrated',
        'archive_documents_to_migrate', 'last_updated'
    ]

    headers = [
        'id', 'state', 'start time', 'end time', 'from', 'to',
        'migrated count', 'total to migrate', 'last updated'
    ]

    tbl = PrettyTable(field_names=headers)
    tbl.set_style(PLAIN_COLUMNS)
    for t in tasks:
        tbl.add_row([t[x] for x in fields])

    print(tbl.get_string(sortby='id'))
示例#6
0
def policy_engine_packages_upgrade_007_008():
    from anchore_engine.db import session_scope, ImagePackage, ImageNpm, ImageGem, Image
    if True:
        engine = anchore_engine.db.entities.common.get_engine()

        file_path_length = 512
        hash_length = 80

        new_columns = [{
            'table_name':
            'image_packages',
            'columns': [
                Column('pkg_path', String(file_path_length), primary_key=True),
                Column('pkg_path_hash', String(hash_length)),
                Column('metadata_json', StringJSON),
            ]
        }, {
            'table_name':
            'image_package_vulnerabilities',
            'columns': [
                Column('pkg_path', String(file_path_length), primary_key=True),
            ]
        }, {
            'table_name':
            'image_package_db_entries',
            'columns': [
                Column('pkg_path', String(file_path_length), primary_key=True),
            ]
        }]

        log.err("creating new table columns")
        for table in new_columns:
            for column in table['columns']:
                log.err("creating new column ({}) in table ({})".format(
                    column.name, table.get('table_name', "")))
                try:
                    cn = column.compile(dialect=engine.dialect)
                    ct = column.type.compile(engine.dialect)
                    engine.execute(
                        'ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s' %
                        (table['table_name'], cn, ct))
                except Exception as e:
                    log.err(
                        'failed to perform DB upgrade on {} adding column - exception: {}'
                        .format(table, str(e)))
                    raise Exception(
                        'failed to perform DB upgrade on {} adding column - exception: {}'
                        .format(table, str(e)))

        # populate the new columns
        log.err("updating new column (pkg_path) - this may take a while")
        for table in ['image_packages', 'image_package_vulnerabilities']:
            log.err("updating table ({}) column (pkg_path)".format(table))
            done = False
            while not done:
                startts = time.time()
                rc = engine.execute(
                    "UPDATE {} set pkg_path='pkgdb' where pkg_path is null".
                    format(table))
                log.err(
                    "updated {} records in {} (time={}), performing next range"
                    .format(rc.rowcount, table,
                            time.time() - startts))
                done = True

        with session_scope() as dbsession:
            db_image_ids = dbsession.query(Image.id).distinct().all()

        total_records = len(db_image_ids)
        record_count = 0
        for record in db_image_ids:
            db_image_id = record[0]
            startts = time.time()
            rc = engine.execute(
                "UPDATE image_package_db_entries set pkg_path='pkgdb' where image_id='{}' and pkg_path is null"
                .format(db_image_id))
            record_count = record_count + 1
            log.err(
                "updated {} image ({} / {}) in {} (time={}), performing next image update"
                .format(db_image_id, record_count, total_records,
                        'image_package_db_entries',
                        time.time() - startts))

        exec_commands = [
            'ALTER TABLE image_package_vulnerabilities DROP CONSTRAINT IF EXISTS image_package_vulnerabilities_pkg_image_id_fkey',
            'ALTER TABLE image_package_db_entries DROP CONSTRAINT IF EXISTS image_package_db_entries_image_id_fkey',
            'ALTER TABLE image_packages DROP CONSTRAINT IF EXISTS image_packages_pkey',
            'ALTER TABLE image_package_db_entries DROP CONSTRAINT IF EXISTS image_package_db_entries_pkey',
            'ALTER TABLE image_package_vulnerabilities DROP CONSTRAINT IF EXISTS image_package_vulnerabilities_pkey',
        ]

        log.err(
            "dropping primary key / foreign key relationships for new column")
        cmdcount = 1
        for command in exec_commands:
            log.err("running update operation {} of {}: {}".format(
                cmdcount, len(exec_commands), command))
            engine.execute(command)
            cmdcount = cmdcount + 1

        exec_commands = [
            'ALTER TABLE image_packages ADD PRIMARY KEY (image_id,image_user_id,name,version,pkg_type,arch,pkg_path)',
            'ALTER TABLE image_package_vulnerabilities ADD PRIMARY KEY (pkg_user_id,pkg_image_id,pkg_name,pkg_version,pkg_type,pkg_arch,vulnerability_id,pkg_path)',
            'ALTER TABLE image_package_db_entries ADD PRIMARY KEY (image_id, image_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path,file_path)',
            'ALTER TABLE image_package_vulnerabilities ADD CONSTRAINT image_package_vulnerabilities_pkg_image_id_fkey FOREIGN KEY (pkg_image_id, pkg_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path) REFERENCES image_packages (image_id, image_user_id, name, version, pkg_type, arch, pkg_path) MATCH SIMPLE',
            'ALTER TABLE image_package_db_entries ADD CONSTRAINT image_package_db_entries_image_id_fkey FOREIGN KEY (image_id, image_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path) REFERENCES image_packages (image_id, image_user_id, name, version, pkg_type, arch, pkg_path) MATCH SIMPLE',

            # These are helpers for the upgrade itself, not needed by the functioning system. Needed for large npm/gem tables and pagination support
            "CREATE SEQUENCE IF NOT EXISTS image_npms_seq_id_seq",
            "ALTER TABLE image_npms add column IF NOT EXISTS seq_id int DEFAULT nextval('image_npms_seq_id_seq')",
            "CREATE INDEX IF NOT EXISTS idx_npm_seq ON image_npms using btree (seq_id)",
            "CREATE SEQUENCE IF NOT EXISTS image_gems_seq_id_seq",
            "ALTER TABLE image_gems add column IF NOT EXISTS seq_id int DEFAULT nextval('image_gems_seq_id_seq')",
            "CREATE INDEX IF NOT EXISTS idx_gem_seq ON image_gems using btree (seq_id)",
            "ALTER TABLE image_packages ALTER COLUMN origin TYPE varchar"
        ]

        log.err(
            "updating primary key / foreign key relationships for new column - this may take a while"
        )
        cmdcount = 1
        for command in exec_commands:
            log.err("running update operation {} of {}: {}".format(
                cmdcount, len(exec_commands), command))
            engine.execute(command)
            cmdcount = cmdcount + 1

        log.err(
            "converting ImageNpm and ImageGem records into ImagePackage records - this may take a while"
        )
        # migrate ImageNpm and ImageGem records into ImagePackage records
        with session_scope() as dbsession:
            total_npms = dbsession.query(ImageNpm).count()
            total_gems = dbsession.query(ImageGem).count()

        log.err("will migrate {} image npm records".format(total_npms))

        npms = []
        chunk_size = 8192
        record_count = 0
        skipped_count = 0

        with session_scope() as dbsession:
            try:
                last_seq = -1
                while record_count < total_npms:
                    chunk_time = time.time()
                    log.err('Processing next chunk of records')
                    for n in dbsession.query(ImageNpm).filter(
                            ImageNpm.seq_id > last_seq).limit(chunk_size):
                        np = ImagePackage()

                        # primary keys
                        np.name = n.name
                        if len(n.versions_json):
                            version = n.versions_json[0]
                        else:
                            version = "N/A"
                        np.version = version
                        np.pkg_type = 'npm'
                        np.arch = 'N/A'
                        np.image_user_id = n.image_user_id
                        np.image_id = n.image_id
                        np.pkg_path = n.path

                        # other
                        np.pkg_path_hash = n.path_hash
                        np.distro_name = 'npm'
                        np.distro_version = 'N/A'
                        np.like_distro = 'npm'
                        np.fullversion = np.version
                        np.license = ' '.join(n.licenses_json)
                        np.origin = ' '.join(n.origins_json)
                        fullname = np.name
                        np.normalized_src_pkg = fullname
                        np.src_pkg = fullname

                        npms.append(np)
                        last_seq = n.seq_id

                    if len(npms):
                        log.err('Inserting {} new records'.format(len(npms)))

                        startts = time.time()
                        try:
                            with session_scope() as dbsession2:
                                dbsession2.bulk_save_objects(npms)
                        except Exception as err:
                            log.err("skipping duplicates: {}".format(err))
                            skipped_count += 1

                        record_count = record_count + len(npms)
                        log.err("merged {} / {} npm records (time={})".format(
                            record_count, total_npms,
                            time.time() - startts))

                    log.err(
                        'Chunk took: {} seconds to process {} records'.format(
                            time.time() - chunk_time, len(npms)))
                    npms = []

            except Exception as err:
                log.err('Error during npm migration: {}'.format(err))
                raise err

        log.err("will migrate {} image gem records".format(total_gems))
        gems = []
        record_count = 0
        skipped_count = 0
        with session_scope() as dbsession:
            try:
                last_seq = -1
                while record_count < total_gems:
                    chunk_time = time.time()
                    log.err('Processing next chunk of records')
                    for n in dbsession.query(ImageGem).filter(
                            ImageGem.seq_id > last_seq).limit(chunk_size):

                        np = ImagePackage()

                        # primary keys
                        np.name = n.name
                        if len(n.versions_json):
                            version = n.versions_json[0]
                        else:
                            version = "N/A"
                        np.version = version
                        np.pkg_type = 'gem'
                        np.arch = 'N/A'
                        np.image_user_id = n.image_user_id
                        np.image_id = n.image_id
                        np.pkg_path = n.path

                        # other
                        np.pkg_path_hash = n.path_hash
                        np.distro_name = 'gem'
                        np.distro_version = 'N/A'
                        np.like_distro = 'gem'
                        np.fullversion = np.version
                        np.license = ' '.join(n.licenses_json)
                        np.origin = ' '.join(n.origins_json)
                        fullname = np.name
                        np.normalized_src_pkg = fullname
                        np.src_pkg = fullname
                        gems.append(np)
                        last_seq = n.seq_id

                    if len(gems):
                        log.err('Inserting {} new records'.format(len(gems)))

                        startts = time.time()
                        try:
                            with session_scope() as dbsession2:
                                dbsession2.bulk_save_objects(gems)
                        except Exception as err:
                            log.err("skipping duplicates: {}".format(err))
                            skipped_count += 1

                        record_count = record_count + len(gems)
                        log.err("merged {} / {} gem records (time={})".format(
                            record_count, total_gems,
                            time.time() - startts))

                    log.err(
                        'Chunk took: {} seconds to process {} records'.format(
                            time.time() - chunk_time, len(npms)))
                    gems = []

            except Exception as err:
                log.err('Error during gem migration: {}'.format(err))
                raise err
示例#7
0
def initiate_migration(from_config, to_config, remove_on_source=False, do_lock=True):
    """
    Start a migration operation from one config to another, with optionally removing the data on the source and optionally using a global lock.

    Expects the input configs to be already validated and normalized.

    :param from_config:
    :param to_config:
    :param remove_on_source:
    :param do_lock:
    :return:
    """

    logger.info('Initializing migration from {} to {}'.format(from_config, to_config))


    with migration_context(from_config, to_config, do_lock=do_lock) as context:
        with session_scope() as db:
            # Load all metadata
            to_migrate = [(record.userId, record.bucket, record.archiveId, record.content_url) for record in db.query(ArchiveMetadata).filter(ArchiveMetadata.content_url.like(context.from_archive.primary_client.__uri_scheme__ + '://%'))]

            task_record = ArchiveMigrationTask()
            task_record.archive_documents_to_migrate = len(to_migrate)
            task_record.archive_documents_migrated = 0
            task_record.migrate_from_driver = context.from_archive.primary_client.__config_name__
            task_record.migrate_to_driver = context.to_archive.primary_client.__config_name__
            task_record.state = 'running'
            task_record.started_at = datetime.datetime.utcnow()

            task_record.executor_id = get_threadbased_id()

            db.add(task_record)
            db.flush()
            task_id = task_record.id
            logger.info('Migration Task Id: {}'.format(task_id))

        logger.info('Entering main migration loop')
        logger.info('Migrating {} documents'.format(len(to_migrate)))
        counter = 0
        result_state = 'failed'

        try:
            for (userId, bucket, archiveId, content_url) in to_migrate:
                # content_url = None

                try:
                    # Use high-level archive operations to ensure compression etc are updated appropriately
                    data = context.from_archive.get(userId, bucket, archiveId)
                    context.to_archive.put(userId, bucket, archiveId, data)

                #     with session_scope() as db:
                #         record = db.query(ArchiveMetadata).filter(ArchiveMetadata.userId == rec_tuple[0], ArchiveMetadata.bucket == rec_tuple[1], ArchiveMetadata.archiveId == rec_tuple[2]).first()
                #         if not record:
                #             logger.warn('No record found in db for: {}'.format(rec_tuple))
                #             continue
                #
                #         if not record.content_url.startswith(context.from_client.__uri_scheme__ + '://'):
                #             logger.warn('Initial query returned content url: {} but migration query found url {}. Skipping.'.format(rec_tuple[4], record.content_url))
                #             continue
                #
                #         logger.info('Migrating document {}/{}/{} -- current uri: {}'.format(record.userId, record.bucket, record.archiveId, record.content_url))
                #         content_url = record.content_url
                #         loaded = context.from_client.get_by_uri(record.content_url)
                #         record.content_url = context.to_client.put(record.userId, record.bucket, record.archiveId, loaded)
                #         logger.info('Migrated document {}/{}/{} -- from {} to {}'.format(record.userId, record.bucket, record.archiveId, content_url, record.content_url))
                #
                #         # Should be the most recent/highest id task
                #         task_record = db.merge(task_record)
                #         task_record.archive_documents_migrated += 1
                #         counter = task_record.archive_documents_migrated
                #
                    if remove_on_source:
                        if context.from_archive.primary_client.__config_name__ != context.to_archive.primary_client.__config_name__:
                            logger.info('Deleting document on source after successful migration to destination. Src = {}'.format(content_url))
                            # Only delete after commit is complete
                            try:
                                context.from_archive.primary_client.delete_by_uri(content_url)
                            except Exception as e:
                                logger.exception('Error cleaning up old record with uri: {}. Aborting migration'.format(content_url))
                                raise
                        else:
                            logger.info('Skipping removal of documents on source because source and dest drivers are the same')
                    else:
                        logger.info('Skipping removal of document on source driver because configured to leave source data.')
                    counter = counter + 1
                except Exception as e:
                    logger.exception('Error migrating content url: {} to {}'.format(content_url, context.from_archive.primary_client.__config_name__, context.to_archive.primary_client.__config_name__,))
            else:
                result_state = 'complete'

        finally:
            with session_scope() as db:
                db.add(task_record)
                db.refresh(task_record)
                task_record.last_state = task_record.state
                task_record.state = result_state
                task_record.ended_at = datetime.datetime.utcnow()
                task_record.archive_documents_migrated = counter
                logger.info('Migration result summary: {}'.format(json.dumps(task_record.to_json())))
示例#8
0
def create_user_credential(accountname, username, credential):
    """
    POST /accounts/{accountname}/users/{username}/credentials
    :param accountname: str account id for account account record
    :param username: str username
    :param credential: json object of the credential type
    :return: credential json object
    """

    try:
        with session_scope() as session:
            mgr = manager_factory.for_session(session)
            user = verify_user(username, accountname, mgr)

            if user["type"] != UserTypes.native:
                return (
                    make_response_error(
                        "Users with type other than 'native' cannot have password credentials",
                        in_httpcode=400,
                    ),
                    400,
                )

            # For now, only support passwords via the api
            if credential["type"] != "password":
                return (
                    make_response_error("Invalid credential type",
                                        in_httpcode=404),
                    404,
                )

            if not credential.get("value"):
                return (
                    make_response_error(
                        "Invalid credential value, must be non-null and non-empty",
                        in_httpcode=400,
                    ),
                    400,
                )

            try:
                cred_type = UserAccessCredentialTypes(credential["type"])
            except:
                return (
                    make_response_error(errmsg="Invalid credential type",
                                        in_httpcode=400),
                    400,
                )

            cred = mgr.add_user_credential(username=username,
                                           credential_type=cred_type,
                                           value=credential["value"])

            return credential_db_to_msg(cred), 200
    except UserNotFoundError as ex:
        return make_response_error("User not found", in_httpcode=404), 404
    except AccountNotFoundError as ex:
        return make_response_error("Account not found", in_httpcode=404), 404
    except Exception as e:
        logger.exception("API Error")
        return (
            make_response_error(
                "Internal error creating credential {}".format(accountname)),
            500,
        )
示例#9
0
def imagepolicywebhook(bodycontent):

    # TODO - while the image policy webhook feature is in k8s beta, we've decided to make any errors that occur during check still respond with 'allowed: True'.  This should be reverted to default to 'False' on any error, once the k8s feature is further along

    return_object = {
        "apiVersion": "imagepolicy.k8s.io/v1alpha1",
        "kind": "ImageReview",
        "status": {
            "allowed": True,
            "reason": "all images passed anchore policy evaluation"
        }
    }
    httpcode = 200

    try:
        request_inputs = anchore_engine.apis.do_request_prep(connexion.request,
                                                             default_params={})

        user_auth = request_inputs['auth']
        method = request_inputs['method']
        params = request_inputs['params']
        userId = request_inputs['userId']

        try:

            final_allowed = True
            reason = "unset"

            try:
                try:
                    #incoming = json.loads(bodycontent)
                    incoming = bodycontent
                    logger.debug("incoming post data: " +
                                 json.dumps(incoming, indent=4))
                except Exception as err:
                    raise Exception("could not load post data as json: " +
                                    str(err))

                try:
                    requestUserId = None
                    requestPolicyId = None
                    # see if the request from k8s contains an anchore policy and/or whitelist name
                    if 'annotations' in incoming['spec']:
                        logger.debug(
                            "incoming request contains annotations: " +
                            json.dumps(incoming['spec']['annotations'],
                                       indent=4))
                        requestUserId = incoming['spec']['annotations'].pop(
                            "anchore.image-policy.k8s.io/userId", None)
                        requestPolicyId = incoming['spec']['annotations'].pop(
                            "anchore.image-policy.k8s.io/policyBundleId", None)
                except Exception as err:
                    raise Exception("could not parse out annotations: " +
                                    str(err))

                if not requestUserId:
                    raise Exception(
                        "need to specify an anchore.image-policy.k8s.io/userId annotation with a valid anchore service username as a value"
                    )

                # TODO - get anchore system uber cred to access
                # this data on behalf of user?  tough...maybe see
                # if kuber can make request as anchore-system so
                # that we can switch roles?
                localconfig = anchore_engine.configuration.localconfig.get_config(
                )
                system_user_auth = localconfig['system_user_auth']

                # TODO: zhill This is bad, this is identity switching, need to resolve (this is not a change to previous behavior, but that behavior was bad)
                with session_scope() as dbsession:
                    mgr = manager_factory.for_session(dbsession)
                    request_user_auth = mgr.get_credentials_for_userid(
                        requestUserId)

                catalog = CatalogClient(
                    user=request_user_auth[0],
                    password=request_user_auth[1],
                    as_account=requestUserId if requestUserId else
                    ApiRequestContextProxy.effective_account())

                reason = "all images passed anchore policy checks"
                final_action = False
                for el in incoming['spec']['containers']:
                    image = el['image']
                    logger.debug("found image in request: " + str(image))
                    image_records = catalog.get_image(tag=image)
                    if not image_records:
                        raise Exception("could not find requested image (" +
                                        str(image) + ") in anchore service DB")

                    for image_record in image_records:
                        imageDigest = image_record['imageDigest']

                        for image_detail in image_record['image_detail']:
                            fulltag = image_detail[
                                'registry'] + "/" + image_detail[
                                    'repo'] + ':' + image_detail['tag']
                            result = catalog.get_eval_latest(
                                tag=fulltag,
                                imageDigest=imageDigest,
                                policyId=requestPolicyId)
                            if result:
                                httpcode = 200
                                if result['final_action'].upper() not in [
                                        'GO', 'WARN'
                                ]:
                                    final_action = False
                                    raise Exception(
                                        "image failed anchore policy check: " +
                                        json.dumps(result, indent=4))
                                else:
                                    final_action = True

                            else:
                                httpcode = 404
                                final_action = False
                                raise Exception(
                                    "no anchore evaluation available for image: "
                                    + str(image))

                final_allowed = final_action

            except Exception as err:
                reason = str(err)
                final_allowed = False
                httpcode = 200

            return_object['status']['allowed'] = final_allowed
            return_object['status']['reason'] = reason

            anchore_engine.subsys.metrics.counter_inc(
                "anchore_image_policy_webhooks_evaluation_total",
                allowed=final_allowed)

            #logger.debug("final return: " + json.dumps(return_object, indent=4))
            httpcode = 200
        except Exception as err:
            return_object['reason'] = str(err)
            httpcode = 500

    except Exception as err:
        return_object['reason'] = str(err)
        httpcode = 500

    return (return_object, httpcode)
示例#10
0
def registerService(sname, config, enforce_unique=True):
    ret = False
    myconfig = config['services'][sname]

    # TODO add version support/detection here

    service_template = {'type': 'anchore', 'base_url': 'N/A', 'version': 'v1'}

    if 'ssl_enable' in myconfig and myconfig['ssl_enable']:
        hstring = "https"
    else:
        hstring = "http"

    endpoint_hostname = endpoint_port = endpoint_hostport = None
    if 'endpoint_hostname' in myconfig:
        endpoint_hostname = myconfig['endpoint_hostname']
        service_template[
            'base_url'] = hstring + "://" + myconfig['endpoint_hostname']
    if 'port' in myconfig:
        endpoint_port = int(myconfig['port'])
        service_template['base_url'] += ":" + str(endpoint_port)

    if endpoint_hostname:
        endpoint_hostport = endpoint_hostname
        if endpoint_port:
            endpoint_hostport = endpoint_hostport + ":" + str(endpoint_port)

    try:
        service_template['status'] = True
        service_template['status_message'] = "registered"

        with session_scope() as dbsession:
            service_records = db_services.get_byname(sname, session=dbsession)

            # fail if trying to add a service that must be unique in the system, but one already is registered in DB
            if enforce_unique:
                if len(service_records) > 1:
                    raise Exception(
                        "more than one entry for service type (" + str(sname) +
                        ") exists in DB, but service must be unique - manual DB intervention required"
                    )

                for service_record in service_records:
                    if service_record and (service_record['hostid'] !=
                                           config['host_id']):
                        raise Exception(
                            "service type (" + str(sname) +
                            ") already exists in system with different host_id - detail: my_host_id="
                            + str(config['host_id']) + " db_host_id=" +
                            str(service_record['hostid']))

            # in any case, check if another host is registered that has the same endpoint
            for service_record in service_records:
                if service_record[
                        'base_url'] and service_record['base_url'] != 'N/A':
                    service_hostport = re.sub("^http.//", "",
                                              service_record['base_url'])
                    # if a different host_id has the same endpoint, fail
                    if (service_hostport == endpoint_hostport) and (
                            config['host_id'] != service_record['hostid']):
                        raise Exception(
                            "trying to add new host but found conflicting endpoint from another host in DB - detail: my_host_id="
                            + str(config['host_id']) + " db_host_id=" +
                            str(service_record['hostid']) +
                            " my_host_endpoint=" + str(endpoint_hostport) +
                            " db_host_endpoint=" + str(service_hostport))
                7
            # if all checks out, then add/update the registration
            ret = db_services.add(config['host_id'],
                                  sname,
                                  service_template,
                                  session=dbsession)

    except Exception as err:
        raise err

    return (ret)
示例#11
0
 def get_document_meta(self, userId, bucket, key):
     with db.session_scope() as dbsession:
         return db_archivedocument.get_onlymeta(userId,
                                                bucket,
                                                key,
                                                session=dbsession)
示例#12
0
def init_oauth(app, grant_types, expiration_config):
    """
    Configure the oauth routes and handlers via authlib

    :param app:
    :param grant_types:
    :param expiration_config:
    :return:
    """
    logger.debug("Initializing oauth routes")
    try:
        tok_mgr = token_manager()
        logger.info("Initialized the token manager")
    except OauthNotConfiguredError:
        logger.info("OAuth support not configured, cannot initialize it")
        return None
    except InvalidOauthConfigurationError:
        logger.error("OAuth has invalid configuration, cannot initialize it")
        raise

    def query_client(client_id):
        db = get_session()
        c = db.query(OAuth2Client).filter_by(client_id=client_id).first()
        return c

    def do_not_save_token(token, request):
        return None

    # Don't use this (yet), due to token signing that allows system to verify without persistence
    def save_token(token, request):
        try:
            if request.user:
                user_id = request.user.username
            else:
                user_id = None

            client = request.client
            tok = OAuth2Token(client_id=client.client_id,
                              user_id=user_id,
                              **token)

            db = get_session()
            db.add(tok)
            db.commit()
        except:
            logger.exception("Exception saving token")
            raise

    try:
        expected_metadata = {
            "token_endpoint_auth_method":
            "none",  # This should be a function of the grant type input but all of our types are this currently
            "client_name": "anonymous",
            "grant_types": [grant.GRANT_TYPE for grant in grant_types],
        }

        # Initialize an anonymous client record
        with session_scope() as db:
            found = (db.query(OAuth2Client).filter_by(
                client_id=ANONYMOUS_CLIENT_ID).one_or_none())

            logger.info("Creating new oauth client record for %s",
                        ANONYMOUS_CLIENT_ID)
            to_merge = OAuth2Client()
            to_merge.client_id = ANONYMOUS_CLIENT_ID
            to_merge.user_id = None
            to_merge.client_secret = None
            # These are no-ops effectively since the client isn't authenticated itself
            to_merge.client_id_issued_at = time.time() - 100
            to_merge.client_secret_expires_at = time.time() + 1000
            to_merge.set_client_metadata({
                "token_endpoint_auth_method":
                "none",  # This should be a function of the grant type input but all of our types are this currently
                "client_name":
                ANONYMOUS_CLIENT_ID,
                "grant_types": [grant.GRANT_TYPE for grant in grant_types],
            })

            merged = setup_oauth_client(found, to_merge)
            merged = db.merge(merged)
            logger.info(
                "Initializing db record for oauth client %s with grants %s",
                merged.client_id,
                merged.client_metadata.get("grant_types"),
            )
    except Exception as e:
        logger.debug("Default client record init failed: {}".format(e))

    app.config["OAUTH2_JWT_ENABLED"] = True
    app.config["OAUTH2_ACCESS_TOKEN_GENERATOR"] = generate_token
    app.config["OAUTH2_REFRESH_TOKEN_GENERATOR"] = False

    # Only the password grant type is used, others can stay defaults
    app.config["OAUTH2_TOKEN_EXPIRES_IN"] = expiration_config

    app.config["OAUTH2_JWT_KEY"] = tok_mgr.default_issuer().signing_key
    app.config["OAUTH2_JWT_ISS"] = tok_mgr.default_issuer().issuer
    app.config["OAUTH2_JWT_ALG"] = tok_mgr.default_issuer().signing_alg

    authz = AuthorizationServer(app,
                                query_client=query_client,
                                save_token=do_not_save_token)
    # Support only the password grant for now
    for grant in grant_types:
        logger.debug("Registering oauth grant handler: {}".format(
            getattr(grant, "GRANT_TYPE", "unknown")))
        authz.register_grant(grant)

    logger.debug("Oauth init complete")
    return authz
示例#13
0
def _system_creds_provider():
    with session_scope() as session:
        mgr = manager_factory.for_session(session=session)
        return mgr.get_system_credentials()
示例#14
0
def initialize():
    global archive_initialized, data_volume, use_db, archive_driver

    localconfig = anchore_engine.configuration.localconfig.get_config()
    myconfig = localconfig['services']['catalog']

    try:
        data_volume = None
        if 'archive_data_dir' in myconfig:
            data_volume = myconfig['archive_data_dir']

        archive_driver = 'db'
        if 'archive_driver' in myconfig:
            archive_driver = myconfig['archive_driver']

        if 'use_db' in myconfig and myconfig['use_db']:
            archive_driver = 'db'

        # driver specific initializations here
        if archive_driver == 'db':
            use_db = True
        else:
            use_db = False
            initialize_archive_file(myconfig)

    except Exception as err:
        raise err

    logger.debug("archive initialization config: " +
                 str([archive_driver, use_db, data_volume]))

    # this section is for conversion on initialization between db driver and other driver
    with db.session_scope() as dbsession:
        logger.debug("running archive driver converter")

        if use_db:
            # need to check if any archive records do not have the document field populated, and if so try to import from localfs
            dbfilter = {'jsondata': '{}'}
            archive_matches = db_archivedocument.list_all(session=dbsession,
                                                          **dbfilter)
            for archive_match in archive_matches:
                userId = archive_match['userId']
                bucket = archive_match['bucket']
                archiveid = archive_match['archiveId']
                try:
                    fs_data = read_archive_file(userId,
                                                bucket,
                                                archiveid,
                                                driver_override='localfs')
                except Exception as err:
                    logger.debug("no data: " + str(err))
                    fs_data = None

                if fs_data:
                    logger.debug("document data - converting driver->DB: " +
                                 str([userId, bucket, archiveid]))
                    with db.session_scope() as subdbsession:
                        db_archivedocument.add(
                            userId,
                            bucket,
                            archiveid,
                            archiveid + ".json",
                            {'jsondata': json.dumps(fs_data)},
                            session=subdbsession)
                    delete_archive_file(userId,
                                        bucket,
                                        archiveid,
                                        driver_override='localfs')

        else:
            # need to check if any archive records DO have the document field populated, and if so try to export to localfs
            archive_matches = db_archivedocument.list_all_notempty(
                session=dbsession)
            for archive_match in archive_matches:
                userId = archive_match['userId']
                bucket = archive_match['bucket']
                archiveid = archive_match['archiveId']
                archive_record = db_archivedocument.get(userId,
                                                        bucket,
                                                        archiveid,
                                                        session=dbsession)
                db_data = json.loads(archive_record['jsondata'])

                logger.debug("document data - converting DB->driver: " +
                             str([userId, bucket, archiveid]))
                dataref = write_archive_file(userId,
                                             bucket,
                                             archiveid,
                                             db_data,
                                             driver_override='localfs')
                with db.session_scope() as subdbsession:
                    db_archivedocument.add(userId,
                                           bucket,
                                           archiveid,
                                           archiveid + ".json",
                                           {'jsondata': "{}"},
                                           session=subdbsession)

        if False:
            for archive_record in db_archivedocument.get_all_iter(
                    session=dbsession):
                userId = archive_record['userId']
                bucket = archive_record['bucket']
                archiveid = archive_record['archiveId']
                dataref = archive_record['documentName']
                if archive_record['jsondata']:
                    if archive_record['jsondata'] == "{}":
                        db_data = None
                    else:
                        try:
                            db_data = json.loads(archive_record['jsondata'])
                        except:
                            logger.warn(
                                "could no load jsondata for archive record: " +
                                str([userId, bucket, archiveid]))
                            db_data = None
                else:
                    db_data = None

                if use_db and not db_data:
                    try:
                        fs_data = read_archive_file(userId,
                                                    bucket,
                                                    archiveid,
                                                    driver_override='localfs')
                    except Exception as err:
                        logger.debug("no data: " + str(err))
                        fs_data = None
                    if fs_data:
                        logger.debug(
                            "document data not in DB but is on FS - converting: "
                            + str([userId, bucket, archiveid]))
                        with db.session_scope() as subdbsession:
                            db_archivedocument.add(
                                userId,
                                bucket,
                                archiveid,
                                archiveid + ".json",
                                {'jsondata': json.dumps(fs_data)},
                                session=subdbsession)
                        delete_archive_file(userId,
                                            bucket,
                                            archiveid,
                                            driver_override='localfs')

                elif not use_db and db_data:
                    logger.debug(
                        "document data not on FS but is in DB - converting: " +
                        str([userId, bucket, archiveid]))
                    dataref = write_archive_file(userId,
                                                 bucket,
                                                 archiveid,
                                                 db_data,
                                                 driver_override='localfs')
                    with db.session_scope() as subdbsession:
                        db_archivedocument.add(userId,
                                               bucket,
                                               archiveid,
                                               archiveid + ".json",
                                               {'jsondata': "{}"},
                                               session=subdbsession)

        logger.debug("archive driver converter complete")
    archive_initialized = True
    return (True)
示例#15
0
    def run_feeds_update(cls, json_obj=None, force_flush=False):
        """
        Creates a task and runs it, optionally with a thread if locking is enabled.

        :return:
        """
        error = None
        feeds = None

        with session_scope() as session:
            mgr = identities.manager_factory.for_session(session)
            catalog_client = internal_client_for(CatalogClient, userId=None)

        try:

            feeds = get_selected_feeds_to_sync(localconfig.get_config())
            if json_obj:
                task = cls.from_json(json_obj)
                if not task:
                    return None
                task.feeds = feeds
            else:
                task = FeedsUpdateTask(feeds_to_sync=feeds, flush=force_flush)

            # Create feed task begin event
            try:
                catalog_client.add_event(
                    FeedSyncStart(groups=feeds if feeds else 'all'))
            except:
                log.exception(
                    'Ignoring event generation error before feed sync')

            result = []
            if cls.locking_enabled:
                run_target_with_lease(
                    account=None,
                    lease_id='feed_sync',
                    ttl=90,
                    target=lambda: result.append(task.execute()))
                # A bit of work-around for the lambda def to get result from thread execution
                if result:
                    result = result[0]
            else:
                result = task.execute()

            return result
        except Exception as e:
            error = e
            log.exception('Error executing feeds update')
            raise e
        finally:
            # log feed sync event
            try:
                if error:
                    catalog_client.add_event(
                        FeedSyncFail(groups=feeds if feeds else 'all',
                                     error=error))
                else:
                    catalog_client.add_event(
                        FeedSyncComplete(groups=feeds if feeds else 'all'))
            except:
                log.exception(
                    'Ignoring event generation error after feed sync')
示例#16
0
def tearDown():
    with session_scope() as session:
        mgr = identities.manager_factory.for_session(session)
        for accnt in mgr.list_accounts():
            logger.info('Deleting accnt: {}'.format(accnt))
            mgr.delete_account(accnt['name'])
示例#17
0
def add_image(
    image_metadata=None,
    tag=None,
    digest=None,
    created_at=None,
    from_archive=False,
    allow_dockerfile_update=False,
):
    try:
        if image_metadata is None:
            image_metadata = {}

        request_inputs = anchore_engine.apis.do_request_prep(
            connexion.request,
            default_params={
                "tag": tag,
                "digest": digest,
                "created_at": created_at,
                "allow_dockerfile_update": allow_dockerfile_update,
            },
        )
        if image_metadata.get("import_operation_id") and from_archive:
            raise BadRequest(
                'Cannot specify both "from_archive=True" query parameter and include an import manifest in the payload',
                detail={},
            )

        if from_archive:
            # Restore an image from the analysis archive into the working set
            task = archiver.RestoreArchivedImageTask(
                account=ApiRequestContextProxy.namespace(),
                image_digest=digest)
            task.start()

            request_inputs["params"] = {}
            request_inputs["method"] = "GET"

            with db.session_scope() as session:
                (
                    return_object,
                    httpcode,
                ) = anchore_engine.services.catalog.catalog_impl.image_imageDigest(
                    session, request_inputs, digest)

        elif image_metadata.get("import_manifest"):
            # Import an image from the upload API
            try:
                import_manifest = ImportManifest.from_json(
                    image_metadata["import_manifest"])
            except Exception as err:
                logger.debug_exception("Error unmarshalling manifest")
                # If we hit this, it means the swagger spec doesn't match the marshmallow scheme
                raise BadRequest(message="invalid import manifest",
                                 detail={"error": str(err)})

            annotations = image_metadata.get("annotations", {})

            # Don't accept an in-line dockerfile
            if image_metadata.get("dockerfile"):
                raise BadRequest(
                    "Cannot provide dockerfile content directly in import payload. Use the import operation APIs to load the dockerfile before calling this endpoint",
                    detail={},
                )

            with db.session_scope() as session:
                # allow_dockerfile_update is a poor proxy for the 'force' option
                return_object = anchore_engine.services.catalog.importer.import_image(
                    session,
                    account=ApiRequestContextProxy.namespace(),
                    operation_id=import_manifest.operation_uuid,
                    import_manifest=import_manifest,
                    force=allow_dockerfile_update,
                    annotations=annotations,
                )
                httpcode = 200
        else:
            # Regular image-add case: analyze from a registry
            with db.session_scope() as session:
                (
                    return_object,
                    httpcode,
                ) = anchore_engine.services.catalog.catalog_impl.image(
                    session, request_inputs, bodycontent=image_metadata)

    except AnchoreApiError:
        raise
    except ImageConflict as img_err:
        httpcode = 409
        return_object = str(img_err)
    except Exception as err:
        logger.exception("Error processing image add")
        httpcode = 500
        return_object = str(err)

    return return_object, httpcode
示例#18
0
def registerService(sname, config, enforce_unique=True):
    ret = False
    myconfig = config['services'][sname]

    # TODO add version support/detection here

    service_template = {
        'type': 'anchore',
        'base_url': 'N/A',
        'status_base_url': 'N/A',
        'version': 'v1',
        'short_description': ''
    }

    #if 'ssl_enable' in myconfig and myconfig['ssl_enable']:
    if myconfig.get('ssl_enable', False) or myconfig.get(
            'external_tls', False):
        hstring = "https"
    else:
        hstring = "http"

    endpoint_hostname = endpoint_port = endpoint_hostport = None

    if 'endpoint_hostname' in myconfig:
        endpoint_hostname = myconfig['endpoint_hostname']
        service_template[
            'base_url'] = hstring + "://" + myconfig['endpoint_hostname']
    if 'port' in myconfig:
        endpoint_port = int(myconfig['port'])
        service_template['base_url'] += ":" + str(endpoint_port)

    if endpoint_hostname:
        endpoint_hostport = endpoint_hostname
        if endpoint_port:
            endpoint_hostport = endpoint_hostport + ":" + str(endpoint_port)

    try:
        service_template['status'] = False
        service_template['status_message'] = taskstate.base_state(
            'service_status')

        with session_scope() as dbsession:
            service_records = db_services.get_byname(sname, session=dbsession)

            # fail if trying to add a service that must be unique in the system, but one already is registered in DB
            if enforce_unique:
                if len(service_records) > 1:
                    raise Exception(
                        "more than one entry for service type (" + str(sname) +
                        ") exists in DB, but service must be unique - manual DB intervention required"
                    )

                for service_record in service_records:
                    if service_record and (service_record['hostid'] !=
                                           config['host_id']):
                        raise Exception(
                            "service type (" + str(sname) +
                            ") already exists in system with different host_id - detail: my_host_id="
                            + str(config['host_id']) + " db_host_id=" +
                            str(service_record['hostid']))

            # if all checks out, then add/update the registration
            ret = db_services.add(config['host_id'],
                                  sname,
                                  service_template,
                                  session=dbsession)

            try:
                my_service_record = {
                    'hostid': config['host_id'],
                    'servicename': sname,
                }
                my_service_record.update(service_template)
                servicestatus.set_my_service_record(my_service_record)
            except Exception as err:
                logger.warn(
                    "could not set local service information - exception: {}".
                    format(str(err)))

    except Exception as err:
        raise err

    return (ret)
示例#19
0
def makeService(snames, options, bootstrap_db=False, bootstrap_users=False):

    try:
        # config and init
        configfile = configdir = None
        if options['config']:
            configdir = options['config']
            configfile = os.path.join(options['config'], 'config.yaml')

        anchore_engine.configuration.localconfig.load_config(
            configdir=configdir, configfile=configfile)
        localconfig = anchore_engine.configuration.localconfig.get_config()
        localconfig['myservices'] = []
        logger.spew("localconfig=" +
                    json.dumps(localconfig, indent=4, sort_keys=True))
    except Exception as err:
        log.err("cannot load configuration: exception - " + str(err))
        raise err

    # get versions of things
    try:
        versions = anchore_engine.configuration.localconfig.get_versions()
    except Exception as err:
        log.err("cannot detect versions of service: exception - " + str(err))
        raise err

    logger.info("initializing database")

    # connect to DB
    try:
        db.initialize(versions=versions,
                      bootstrap_db=bootstrap_db,
                      bootstrap_users=bootstrap_users)
    except Exception as err:
        log.err("cannot connect to configured DB: exception - " + str(err))
        raise err

    #credential bootstrap
    with session_scope() as dbsession:
        system_user = db_users.get('anchore-system', session=dbsession)
        localconfig['system_user_auth'] = (system_user['userId'],
                                           system_user['password'])

    # application object
    application = service.Application("multi-service-" + '-'.join(snames))

    #from twisted.python.log import ILogObserver, FileLogObserver
    #from twisted.python.logfile import DailyLogFile
    #logfile = DailyLogFile("ghgh.log", "/tmp/")

    #multi-service
    retservice = service.MultiService()
    retservice.setServiceParent(application)

    success = False
    try:
        scount = 0
        for sname in snames:
            if sname in localconfig['services'] and localconfig['services'][
                    sname]['enabled']:

                smodule = importlib.import_module("anchore_engine.services." +
                                                  sname)

                s = smodule.createService(sname, localconfig)
                s.setServiceParent(retservice)

                rc = smodule.initializeService(sname, localconfig)
                if not rc:
                    raise Exception("failed to initialize service")

                rc = smodule.registerService(sname, localconfig)
                if not rc:
                    raise Exception("failed to register service")

                logger.debug("starting service: " + sname)
                success = True
                scount += 1
                localconfig['myservices'].append(sname)
            else:
                log.err(
                    "service not enabled in config, not starting service: " +
                    sname)

        if scount == 0:
            log.err(
                "no services/subservices were enabled/started on this host")
            success = False
    except Exception as err:
        log.err("cannot create/init/register service: " + sname +
                " - exception: " + str(err))
        success = False

    if not success:
        log.err("cannot start service (see above for information)")
        traceback.print_exc('Service init failure')
        raise Exception("cannot start service (see above for information)")

    return (retservice)
示例#20
0
def makeService(snames,
                options,
                db_connect=True,
                require_system_user_auth=True,
                module_name="anchore_engine.services",
                validate_params={}):

    try:
        logger.enable_bootstrap_logging(service_name=','.join(snames))

        try:
            # config and init
            configfile = configdir = None
            if options['config']:
                configdir = options['config']
                configfile = os.path.join(options['config'], 'config.yaml')

            anchore_engine.configuration.localconfig.load_config(
                configdir=configdir,
                configfile=configfile,
                validate_params=validate_params)
            localconfig = anchore_engine.configuration.localconfig.get_config()
            localconfig['myservices'] = []
            logger.spew("localconfig=" +
                        json.dumps(localconfig, indent=4, sort_keys=True))
        except Exception as err:
            logger.error("cannot load configuration: exception - " + str(err))
            raise err

        # get versions of things
        try:
            versions = anchore_engine.configuration.localconfig.get_versions()
        except Exception as err:
            logger.error("cannot detect versions of service: exception - " +
                         str(err))
            raise err

        if db_connect:
            logger.info("initializing database")

            # connect to DB
            try:
                db.initialize(localconfig=localconfig, versions=versions)
            except Exception as err:
                logger.error("cannot connect to configured DB: exception - " +
                             str(err))
                raise err

            #credential bootstrap
            localconfig['system_user_auth'] = (None, None)
            if require_system_user_auth:
                gotauth = False
                max_retries = 60
                for count in range(1, max_retries):
                    if gotauth:
                        continue
                    try:
                        with session_scope() as dbsession:
                            localconfig[
                                'system_user_auth'] = get_system_user_auth(
                                    session=dbsession)
                        if localconfig['system_user_auth'] != (None, None):
                            gotauth = True
                        else:
                            logger.error(
                                "cannot get system user auth credentials yet, retrying ("
                                + str(count) + " / " + str(max_retries) + ")")
                            time.sleep(5)
                    except Exception as err:
                        logger.error(
                            "cannot get system-user auth credentials - service may not have system level access"
                        )
                        localconfig['system_user_auth'] = (None, None)

                if not gotauth:
                    raise Exception(
                        "service requires system user auth to start")

        # application object
        application = service.Application("multi-service-" + '-'.join(snames))

        #multi-service
        retservice = service.MultiService()
        retservice.setServiceParent(application)

        success = False
        try:
            scount = 0
            for sname in snames:
                if sname in localconfig['services'] and localconfig[
                        'services'][sname]['enabled']:

                    smodule = importlib.import_module(module_name + "." +
                                                      sname)

                    s = smodule.createService(sname, localconfig)
                    s.setServiceParent(retservice)

                    rc = smodule.initializeService(sname, localconfig)
                    if not rc:
                        raise Exception("failed to initialize service")

                    rc = smodule.registerService(sname, localconfig)
                    if not rc:
                        raise Exception("failed to register service")

                    logger.debug("starting service: " + sname)
                    success = True
                    scount += 1
                    localconfig['myservices'].append(sname)
                else:
                    logger.error(
                        "service not enabled in config, not starting service: "
                        + sname)

            if scount == 0:
                logger.error(
                    "no services/subservices were enabled/started on this host"
                )
                success = False
        except Exception as err:
            logger.error("cannot create/init/register service: " + sname +
                         " - exception: " + str(err))
            success = False

        if not success:
            logger.error("cannot start service (see above for information)")
            traceback.print_exc('Service init failure')
            raise Exception("cannot start service (see above for information)")

        return (retservice)
    finally:
        logger.disable_bootstrap_logging()
def test_get_most_recent_active_grypedb_no_active_Db(anchore_db):
    with session_scope() as session:
        with pytest.raises(NoActiveGrypeDB):
            get_most_recent_active_grypedb(session)
示例#22
0
def init_oauth(app, grant_types, expiration_config):
    """
    Configure the oauth routes and handlers via authlib
    :return:
    """
    logger.debug("Initializing oauth routes")
    try:
        tok_mgr = token_manager()
        logger.info("Initialized the token manager")
    except OauthNotConfiguredError:
        logger.info("OAuth support not configured, cannot initialize it")
        return None
    except InvalidOauthConfigurationError:
        logger.error("OAuth has invalid configuration, cannot initialize it")
        raise

    def query_client(client_id):
        db = get_session()
        c = db.query(OAuth2Client).filter_by(client_id=client_id).first()
        return c

    def do_not_save_token(token, request):
        return None

    # Don't use this (yet), due to token signing that allows system to verify without persistence
    def save_token(token, request):
        try:
            if request.user:
                user_id = request.user.username
            else:
                user_id = None

            client = request.client
            tok = OAuth2Token(client_id=client.client_id,
                              user_id=user_id,
                              **token)

            db = get_session()
            db.add(tok)
            db.commit()
        except:
            logger.exception("Exception saving token")
            raise

    try:
        # Initialize an anonymous client record
        with session_scope() as db:
            f = db.query(OAuth2Client).filter_by(client_id="anonymous").first()
            if not f:
                c = OAuth2Client()
                c.client_id = "anonymous"
                c.user_id = None
                c.client_secret = None
                c.issued_at = time.time() - 100
                c.expires_at = time.time() + 1000
                c.grant_type = "password"
                c.token_endpoint_auth_method = "none"
                c.client_name = "anonymous"
                db.add(c)
    except Exception as e:
        logger.debug("Default client record init failed: {}".format(e))

    app.config["OAUTH2_JWT_ENABLED"] = True
    app.config["OAUTH2_ACCESS_TOKEN_GENERATOR"] = generate_token
    app.config["OAUTH2_REFRESH_TOKEN_GENERATOR"] = False

    # Only the password grant type is used, others can stay defaults
    app.config["OAUTH2_TOKEN_EXPIRES_IN"] = expiration_config

    app.config["OAUTH2_JWT_KEY"] = tok_mgr.default_issuer().signing_key
    app.config["OAUTH2_JWT_ISS"] = tok_mgr.default_issuer().issuer
    app.config["OAUTH2_JWT_ALG"] = tok_mgr.default_issuer().signing_alg

    authz = AuthorizationServer(app,
                                query_client=query_client,
                                save_token=do_not_save_token)
    # Support only the password grant for now
    for grant in grant_types:
        logger.debug("Registering oauth grant handler: {}".format(
            getattr(grant, "GRANT_TYPE", "unknown")))
        authz.register_grant(grant)

    logger.debug("Oauth init complete")
    return authz
示例#23
0
def create_account(account):
    """
    POST /accounts

    :param account:
    :return:
    """

    try:
        try:
            can_create_account(account)
        except ValueError as ex:
            return (
                make_response_error("Invalid account request: {}".format(
                    ex.args[0]),
                                    in_httpcode=400),
                400,
            )
        except Exception as ex:
            logger.exception("Unexpected exception in account validation")
            return make_response_error("Invalid account request",
                                       in_httpcode=400), 400

        with session_scope() as session:
            mgr = manager_factory.for_session(session)
            try:
                resp = mgr.create_account(
                    account_name=account["name"],
                    account_type=account.get("type", AccountTypes.user.value),
                    email=account.get("email"),
                )
            except ValueError as ex:
                return (
                    make_response_error("Validation failed: {}".format(ex),
                                        in_httpcode=400),
                    400,
                )

            authorizer.notify(NotificationTypes.domain_created,
                              account["name"])

            # Initialize account stuff
            try:
                _init_policy(account["name"], config=get_config())
            except Exception:
                logger.exception(
                    "Could not initialize policy bundle for new account: {}".
                    format(account["name"]))
                raise

        return account_db_to_msg(resp), 200
    except AccountAlreadyExistsError as ex:
        return (
            make_response_error(errmsg="Account already exists",
                                in_httpcode=400),
            400,
        )
    except Exception as ex:
        logger.exception("Unexpected Error creating account")
        return make_response_error("Error creating account",
                                   in_httpcode=500), 500
示例#24
0
def db_upgrade_007_008():
    from anchore_engine.db import session_scope, ImagePackage, ImageNpm, ImageGem
    if True:
        engine = anchore_engine.db.entities.common.get_engine()

        file_path_length = 512
        hash_length = 80

        new_columns = [{
            'table_name':
            'image_packages',
            'columns': [
                Column('pkg_path', String(file_path_length), primary_key=True),
                Column('pkg_path_hash', String(hash_length)),
                Column('metadata_json', StringJSON),
            ]
        }, {
            'table_name':
            'image_package_vulnerabilities',
            'columns': [
                Column('pkg_path', String(file_path_length), primary_key=True),
            ]
        }, {
            'table_name':
            'image_package_db_entries',
            'columns': [
                Column('pkg_path', String(file_path_length), primary_key=True),
            ]
        }]

        log.err("creating new table columns")
        for table in new_columns:
            for column in table['columns']:
                log.err("creating new column ({}) in table ({})".format(
                    column.name, table.get('table_name', "")))
                try:
                    cn = column.compile(dialect=engine.dialect)
                    ct = column.type.compile(engine.dialect)
                    engine.execute(
                        'ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s' %
                        (table['table_name'], cn, ct))
                except Exception as e:
                    log.err(
                        'failed to perform DB upgrade on {} adding column - exception: {}'
                        .format(table, str(e)))
                    raise Exception(
                        'failed to perform DB upgrade on {} adding column - exception: {}'
                        .format(table, str(e)))

        # populate the new columns
        for table in [
                'image_packages', 'image_package_vulnerabilities',
                'image_package_db_entries'
        ]:
            log.err("updating table ({}) column (pkg_path)".format(table))
            engine.execute(
                "UPDATE {} set pkg_path='pkgdb' where pkg_path is null".format(
                    table))

        exec_commands = [
            'ALTER TABLE image_package_vulnerabilities DROP CONSTRAINT IF EXISTS image_package_vulnerabilities_pkg_image_id_fkey',
            'ALTER TABLE image_package_db_entries DROP CONSTRAINT IF EXISTS image_package_db_entries_image_id_fkey',
            'ALTER TABLE image_packages DROP CONSTRAINT IF EXISTS image_packages_pkey',
            'ALTER TABLE image_package_db_entries DROP CONSTRAINT IF EXISTS image_package_db_entries_pkey',
            'ALTER TABLE image_package_vulnerabilities DROP CONSTRAINT IF EXISTS image_package_vulnerabilities_pkey',
            'ALTER TABLE image_packages ADD PRIMARY KEY (image_id,image_user_id,name,version,pkg_type,arch,pkg_path)',
            'ALTER TABLE image_package_vulnerabilities ADD PRIMARY KEY (pkg_user_id,pkg_image_id,pkg_name,pkg_version,pkg_type,pkg_arch,vulnerability_id,pkg_path)',
            'ALTER TABLE image_package_db_entries ADD PRIMARY KEY (image_id, image_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path,file_path)',
            'ALTER TABLE image_package_vulnerabilities ADD CONSTRAINT image_package_vulnerabilities_pkg_image_id_fkey FOREIGN KEY (pkg_image_id, pkg_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path) REFERENCES image_packages (image_id, image_user_id, name, version, pkg_type, arch, pkg_path) MATCH SIMPLE',
            'ALTER TABLE image_package_db_entries ADD CONSTRAINT image_package_db_entries_image_id_fkey FOREIGN KEY (image_id, image_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path) REFERENCES image_packages (image_id, image_user_id, name, version, pkg_type, arch, pkg_path) MATCH SIMPLE',
        ]

        log.err(
            "updating primary key / foreign key relationships for new column")
        cmdcount = 1
        for command in exec_commands:
            log.err("running update operation {} of {}: {}".format(
                cmdcount, len(exec_commands), command))
            engine.execute(command)
            cmdcount = cmdcount + 1

        log.err(
            "converting ImageNpm and ImageGem records into ImagePackage records"
        )
        # migrate ImageNpm and ImageGem records into ImagePackage records
        with session_scope() as dbsession:
            db_npms = dbsession.query(ImageNpm)
            db_gems = dbsession.query(ImageGem)

        gems = []
        npms = []
        try:
            for n in db_npms:
                np = ImagePackage()

                # primary keys
                np.name = n.name
                np.version = n.versions_json[0]
                np.pkg_type = 'npm'
                np.arch = 'N/A'
                np.image_user_id = n.image_user_id
                np.image_id = n.image_id
                np.pkg_path = n.path

                # other
                np.pkg_path_hash = n.path_hash
                np.distro_name = 'npm'
                np.distro_version = 'N/A'
                np.like_distro = 'npm'
                np.fullversion = np.version
                np.license = ' '.join(n.licenses_json)
                np.origin = ' '.join(n.origins_json)
                fullname = np.name
                np.normalized_src_pkg = fullname
                np.src_pkg = fullname
                npms.append(np)
        except Exception as err:
            raise err

        try:
            for n in db_gems:
                np = ImagePackage()

                # primary keys
                np.name = n.name
                np.version = n.versions_json[0]
                np.pkg_type = 'gem'
                np.arch = 'N/A'
                np.image_user_id = n.image_user_id
                np.image_id = n.image_id
                np.pkg_path = n.path

                # other
                np.pkg_path_hash = n.path_hash
                np.distro_name = 'gem'
                np.distro_version = 'N/A'
                np.like_distro = 'gem'
                np.fullversion = np.version
                np.license = ' '.join(n.licenses_json)
                np.origin = ' '.join(n.origins_json)
                fullname = np.name
                np.normalized_src_pkg = fullname
                np.src_pkg = fullname
                gems.append(np)
        except Exception as err:
            raise err

        with session_scope() as dbsession:
            log.err("merging npms: {} records to merge".format(len(npms)))
            try:
                for npm in npms:
                    dbsession.merge(npm)
            except Exception as err:
                raise err

        with session_scope() as dbsession:
            log.err("merging gems: {} records to merge".format(len(gems)))
            try:
                for gem in gems:
                    dbsession.merge(gem)
            except Exception as err:
                raise err
示例#25
0
def init_oauth(app, grant_types, expiration_config):
    """
    Configure the oauth routes and handlers via authlib
    :return:
    """
    logger.debug('Initializing oauth routes')
    conf = localconfig.get_config()
    if not conf.get('user_authentication', {}).get('oauth', {}).get('enabled'):
        # Not enabled in configuration
        return None

    def query_client(client_id):
        logger.debug('Looking up client: {}'.format(client_id))
        db = get_session()
        c = db.query(OAuth2Client).filter_by(client_id=client_id).first()
        logger.debug('Found client record for client_id: {}'.format(client_id))
        return c

    def do_not_save_token(token, request):
        return None

    # Don't use this (yet), due to token signing that allows system to verify without persistence
    def save_token(token, request):
        try:
            if request.user:
                user_id = request.user.username
            else:
                user_id = None

            client = request.client
            tok = OAuth2Token(client_id=client.client_id,
                              user_id=user_id,
                              **token)

            db = get_session()
            db.add(tok)
            db.commit()
            logger.info('Saved new token')
        except:
            logger.exception('Exception saving token')
            raise

    try:
        # Initialize an anonymous client record
        with session_scope() as db:
            f = db.query(OAuth2Client).filter_by(client_id='anonymous').first()
            if not f:
                c = OAuth2Client()
                c.client_id = 'anonymous'
                c.user_id = None
                c.client_secret = None
                c.issued_at = time.time() - 100
                c.expires_at = time.time() + 1000
                c.grant_type = 'password'
                c.token_endpoint_auth_method = 'none'
                c.client_name = 'anonymous'
                db.add(c)
    except Exception as e:
        logger.debug('Default client record init failed: {}'.format(e))

    app.config['OAUTH2_JWT_ENABLED'] = True
    app.config['OAUTH2_ACCESS_TOKEN_GENERATOR'] = generate_token
    app.config['OAUTH2_REFRESH_TOKEN_GENERATOR'] = False

    # Only the password grant type is used, others can stay defaults
    app.config['OAUTH2_TOKEN_EXPIRES_IN'] = expiration_config

    tok_mgr = token_manager()
    app.config['OAUTH2_JWT_KEY'] = tok_mgr.default_issuer().signing_key
    app.config['OAUTH2_JWT_ISS'] = tok_mgr.default_issuer().issuer
    app.config['OAUTH2_JWT_ALG'] = tok_mgr.default_issuer().signing_alg

    authz = AuthorizationServer(app,
                                query_client=query_client,
                                save_token=do_not_save_token)
    # Support only the password grant for now
    for grant in grant_types:
        logger.debug('Registering oauth grant handler: {}'.format(
            getattr(grant, 'GRANT_TYPE', 'unknown')))
        authz.register_grant(grant)

    logger.debug('Oauth init complete')

    return authz
示例#26
0
def do_db_bootstrap(localconfig=None):
    with upgrade_context(my_module_upgrade_id) as ctx:

        from anchore_engine.db import db_users, session_scope
        with session_scope() as dbsession:
            # system user
            try:
                system_user_record = db_users.get('anchore-system',
                                                  session=dbsession)
                if not system_user_record:
                    rc = db_users.add('anchore-system',
                                      str(uuid.uuid4()), {'active': True},
                                      session=dbsession)
                else:
                    db_users.update(system_user_record['userId'],
                                    system_user_record['password'],
                                    {'active': True},
                                    session=dbsession)

            except Exception as err:
                raise Exception(
                    "Initialization failed: could not fetch/add anchore-system user from/to DB - exception: "
                    + str(err))

            if localconfig:
                try:
                    for userId in localconfig['credentials']['users']:
                        if not localconfig['credentials']['users'][userId]:
                            localconfig['credentials']['users'][userId] = {}

                        cuser = localconfig['credentials']['users'][userId]

                        password = cuser.pop('password', None)
                        email = cuser.pop('email', None)
                        if password and email:
                            db_users.add(userId,
                                         password, {
                                             'email': email,
                                             'active': True
                                         },
                                         session=dbsession)
                        else:
                            raise Exception(
                                "user defined but has empty password/email: " +
                                str(userId))

                    user_records = db_users.get_all(session=dbsession)
                    for user_record in user_records:
                        if user_record['userId'] == 'anchore-system':
                            continue
                        if user_record['userId'] not in localconfig[
                                'credentials']['users']:
                            logger.info(
                                "flagging user '" +
                                str(user_record['userId']) +
                                "' as inactive (in DB, not in configuration)")
                            db_users.update(user_record['userId'],
                                            user_record['password'],
                                            {'active': False},
                                            session=dbsession)

                except Exception as err:
                    raise Exception(
                        "Initialization failed: could not add users from config into DB - exception: "
                        + str(err))
示例#27
0
def db_upgrade_003_004():
    engine = anchore_engine.db.entities.common.get_engine()

    from anchore_engine.db import db_catalog_image, db_archivedocument, session_scope
    import anchore_engine.common

    newcolumns = [
        Column('arch', String, primary_key=False),
        Column('distro', String, primary_key=False),
        Column('distro_version', String, primary_key=False),
        Column('dockerfile_mode', String, primary_key=False),
        Column('image_size', BigInteger, primary_key=False),
        Column('layer_count', Integer, primary_key=False)
    ]
    for column in newcolumns:
        try:
            table_name = 'catalog_image'
            cn = column.compile(dialect=engine.dialect)
            ct = column.type.compile(engine.dialect)
            engine.execute('ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s' %
                           (table_name, cn, ct))
        except Exception as e:
            log.err(
                'failed to perform DB upgrade on catalog_image adding column - exception: {}'
                .format(str(e)))
            raise Exception(
                'failed to perform DB upgrade on catalog_image adding column - exception: {}'
                .format(str(e)))

    with session_scope() as dbsession:
        image_records = db_catalog_image.get_all(session=dbsession)

    for image_record in image_records:
        userId = image_record['userId']
        imageDigest = image_record['imageDigest']

        log.err("upgrade: processing image " + str(imageDigest) + " : " +
                str(userId))
        try:

            # get the image analysis data from archive
            image_data = None
            with session_scope() as dbsession:
                result = db_archivedocument.get(userId,
                                                'analysis_data',
                                                imageDigest,
                                                session=dbsession)
            if result and 'jsondata' in result:
                image_data = json.loads(result['jsondata'])['document']

            if image_data:
                # update the record and store
                anchore_engine.common.helpers.update_image_record_with_analysis_data(
                    image_record, image_data)
                with session_scope() as dbsession:
                    db_catalog_image.update_record(image_record,
                                                   session=dbsession)
            else:
                raise Exception(
                    "upgrade: no analysis data found in archive for image: " +
                    str(imageDigest))
        except Exception as err:
            log.err(
                "upgrade: failed to populate new columns with existing data for image ("
                + str(imageDigest) + "), record may be incomplete: " +
                str(err))

    return True
示例#28
0
def policy_engine_packages_upgrade_007_008():
    from anchore_engine.db import session_scope, ImagePackage, ImageNpm, ImageGem, Image
    if True:
        engine = anchore_engine.db.entities.common.get_engine()

        file_path_length = 512
        hash_length = 80

        new_columns = [
            {
                'table_name': 'image_packages',
                'columns': [
                    Column('pkg_path', String(file_path_length), primary_key=True),
                    Column('pkg_path_hash', String(hash_length)),
                    Column('metadata_json', StringJSON),
                ]
            },
            {
                'table_name': 'image_package_vulnerabilities',
                'columns': [
                    Column('pkg_path', String(file_path_length), primary_key=True),
                ]
            },
            {
                'table_name': 'image_package_db_entries',
                'columns': [
                    Column('pkg_path', String(file_path_length), primary_key=True),
                ]
            }
        ]

        log.err("creating new table columns")
        for table in new_columns:
            for column in table['columns']:
                log.err("creating new column ({}) in table ({})".format(column.name, table.get('table_name', "")))
                try:
                    cn = column.compile(dialect=engine.dialect)
                    ct = column.type.compile(engine.dialect)
                    engine.execute('ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s' % (table['table_name'], cn, ct))
                except Exception as e:
                    log.err('failed to perform DB upgrade on {} adding column - exception: {}'.format(table, str(e)))
                    raise Exception('failed to perform DB upgrade on {} adding column - exception: {}'.format(table, str(e)))


        # populate the new columns
        log.err("updating new column (pkg_path) - this may take a while")
        for table in ['image_packages', 'image_package_vulnerabilities']:
            log.err("updating table ({}) column (pkg_path)".format(table))
            done = False
            while not done:
                startts = time.time()
                rc = engine.execute("UPDATE {} set pkg_path='pkgdb' where pkg_path is null".format(table))
                log.err("updated {} records in {} (time={}), performing next range".format(rc.rowcount, table, time.time() - startts))
                done=True


        with session_scope() as dbsession:
            db_image_ids = dbsession.query(Image.id).distinct().all()

        total_records = len(db_image_ids)
        record_count = 0
        for record in db_image_ids:
            db_image_id = record[0]
            startts = time.time()
            rc = engine.execute("UPDATE image_package_db_entries set pkg_path='pkgdb' where image_id='{}' and pkg_path is null".format(db_image_id))
            record_count = record_count + 1
            log.err("updated {} image ({} / {}) in {} (time={}), performing next image update".format(db_image_id, record_count, total_records, 'image_package_db_entries', time.time() - startts))

        exec_commands = [
            'ALTER TABLE image_package_vulnerabilities DROP CONSTRAINT IF EXISTS image_package_vulnerabilities_pkg_image_id_fkey',
            'ALTER TABLE image_package_db_entries DROP CONSTRAINT IF EXISTS image_package_db_entries_image_id_fkey',
            'ALTER TABLE image_packages DROP CONSTRAINT IF EXISTS image_packages_pkey',
            'ALTER TABLE image_package_db_entries DROP CONSTRAINT IF EXISTS image_package_db_entries_pkey',
            'ALTER TABLE image_package_vulnerabilities DROP CONSTRAINT IF EXISTS image_package_vulnerabilities_pkey',
        ]

        log.err("dropping primary key / foreign key relationships for new column")
        cmdcount = 1
        for command in exec_commands:
            log.err("running update operation {} of {}: {}".format(cmdcount, len(exec_commands), command))
            engine.execute(command)
            cmdcount = cmdcount + 1

        exec_commands = [
            'ALTER TABLE image_packages ADD PRIMARY KEY (image_id,image_user_id,name,version,pkg_type,arch,pkg_path)',
            'ALTER TABLE image_package_vulnerabilities ADD PRIMARY KEY (pkg_user_id,pkg_image_id,pkg_name,pkg_version,pkg_type,pkg_arch,vulnerability_id,pkg_path)',
            'ALTER TABLE image_package_db_entries ADD PRIMARY KEY (image_id, image_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path,file_path)',
            'ALTER TABLE image_package_vulnerabilities ADD CONSTRAINT image_package_vulnerabilities_pkg_image_id_fkey FOREIGN KEY (pkg_image_id, pkg_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path) REFERENCES image_packages (image_id, image_user_id, name, version, pkg_type, arch, pkg_path) MATCH SIMPLE',
            'ALTER TABLE image_package_db_entries ADD CONSTRAINT image_package_db_entries_image_id_fkey FOREIGN KEY (image_id, image_user_id, pkg_name, pkg_version, pkg_type, pkg_arch, pkg_path) REFERENCES image_packages (image_id, image_user_id, name, version, pkg_type, arch, pkg_path) MATCH SIMPLE',
        ]

        log.err("updating primary key / foreign key relationships for new column - this may take a while")
        cmdcount = 1
        for command in exec_commands:
            log.err("running update operation {} of {}: {}".format(cmdcount, len(exec_commands), command))
            engine.execute(command)
            cmdcount = cmdcount + 1


        log.err("converting ImageNpm and ImageGem records into ImagePackage records - this may take a while")
        # migrate ImageNpm and ImageGem records into ImagePackage records
        with session_scope() as dbsession:
            db_npms = dbsession.query(ImageNpm)
            total_npms = dbsession.query(ImageNpm).count()
            db_gems = dbsession.query(ImageGem)
            total_gems = dbsession.query(ImageGem).count()

        npms = []
        chunk_size = 8192
        record_count = 0
        try:
            for n in db_npms:
                np = ImagePackage()

                # primary keys
                np.name = n.name
                if len(n.versions_json):
                    version = n.versions_json[0]
                else:
                    version = "N/A"
                np.version = version
                np.pkg_type = 'npm'
                np.arch = 'N/A'
                np.image_user_id = n.image_user_id
                np.image_id = n.image_id
                np.pkg_path = n.path

                # other
                np.pkg_path_hash = n.path_hash
                np.distro_name = 'npm'
                np.distro_version = 'N/A'
                np.like_distro = 'npm'
                np.fullversion = np.version
                np.license = ' '.join(n.licenses_json)
                np.origin = ' '.join(n.origins_json)
                fullname = np.name
                np.normalized_src_pkg = fullname
                np.src_pkg = fullname
                npms.append(np)
                if len(npms) >= chunk_size:
                    startts = time.time()
                    try:
                        with session_scope() as dbsession:
                            dbsession.bulk_save_objects(npms)
                            record_count = record_count + chunk_size
                    except:
                        log.err("skipping duplicates")
                        record_count = record_count + chunk_size
                    log.err("merged {} / {} npm records (time={}), performing next range".format(record_count, total_npms, time.time() - startts))

                    npms = []

            if len(npms):
                startts = time.time()
                try:
                    with session_scope() as dbsession:
                        dbsession.bulk_save_objects(npms)
                        record_count = record_count + len(npms)
                except:
                    log.err("skipping duplicates")
                    record_count = record_count + len(npms)
                log.err("final merged {} / {} npm records (time={})".format(record_count, total_npms, time.time() - startts))

        except Exception as err:
            raise err

        gems = []
        chunk_size = 8192
        record_count = 0
        try:
            for n in db_gems:

                np = ImagePackage()

                # primary keys
                np.name = n.name
                if len(n.versions_json):
                    version = n.versions_json[0]
                else:
                    version = "N/A"
                np.version = version
                np.pkg_type = 'gem'
                np.arch = 'N/A'
                np.image_user_id = n.image_user_id
                np.image_id = n.image_id
                np.pkg_path = n.path

                # other
                np.pkg_path_hash = n.path_hash
                np.distro_name = 'gem'
                np.distro_version = 'N/A'
                np.like_distro = 'gem'
                np.fullversion = np.version
                np.license = ' '.join(n.licenses_json)
                np.origin = ' '.join(n.origins_json)
                fullname = np.name
                np.normalized_src_pkg = fullname
                np.src_pkg = fullname
                gems.append(np)
                if len(gems) >= chunk_size:
                    startts = time.time()
                    try:
                        with session_scope() as dbsession:
                            dbsession.bulk_save_objects(gems)
                            record_count = record_count + chunk_size
                    except:
                        log.err("skipping duplicates")
                        record_count = record_count + chunk_size
                    log.err("merged {} / {} gem records (time={}), performing next range".format(record_count, total_gems, time.time() - startts))

                    gems = []

            if len(gems):
                startts = time.time()
                try:
                    with session_scope() as dbsession:
                        dbsession.bulk_save_objects(gems)
                        record_count = record_count + len(gems)
                except:
                    log.err("skipping duplicates")
                    record_count = record_count + len(gems)
                log.err("final merged {} / {} gem records (time={})".format(record_count, total_gems, time.time() - startts))

        except Exception as err:
            raise err
示例#29
0
    def _register(self):
        if not self.is_enabled:
            logger.error(
                'Service not enabled in config, not registering service: ' +
                self.name)
            raise Exception('No service enabled, cannot continue bootstrap')

        logger.info('Registering service: {}'.format(self.name))

        service_template = {
            'type': 'anchore',
            'base_url': 'N/A',
            'status_base_url': 'N/A',
            'version': 'v1',
            'short_description': ''
        }

        hstring = 'http'
        if 'external_tls' in self.configuration:
            if self.configuration.get('external_tls', False):
                hstring = 'https'
        elif 'ssl_enable' in self.configuration:
            if self.configuration.get('ssl_enable', False):
                hstring = 'https'

        endpoint_hostname = endpoint_port = endpoint_hostport = None
        if self.configuration.get('external_hostname', False):
            endpoint_hostname = self.configuration.get('external_hostname')
        elif self.configuration.get('endpoint_hostname', False):
            endpoint_hostname = self.configuration.get('endpoint_hostname')

        if self.configuration.get('external_port', False):
            endpoint_port = int(self.configuration.get('external_port'))
        elif self.configuration.get('port', False):
            endpoint_port = int(self.configuration.get('port'))

        if endpoint_hostname:
            endpoint_hostport = endpoint_hostname
            if endpoint_port:
                endpoint_hostport = endpoint_hostport + ":" + str(
                    endpoint_port)

        if endpoint_hostport:
            service_template['base_url'] = "{}://{}".format(
                hstring, endpoint_hostport)
        else:
            raise Exception(
                "could not construct service base_url - please check service configuration for hostname/port settings"
            )

        try:
            service_template['status'] = False
            service_template['status_message'] = taskstate.base_state(
                'service_status')

            with session_scope() as dbsession:
                service_records = db_services.get_byname(self.__service_name__,
                                                         session=dbsession)

                # fail if trying to add a service that must be unique in the system, but one already is registered in DB
                if self.__is_unique_service__:
                    if len(service_records) > 1:
                        raise Exception(
                            'more than one entry for service type (' +
                            str(self.__service_name__) +
                            ') exists in DB, but service must be unique - manual DB intervention required'
                        )

                    for service_record in service_records:
                        if service_record and (service_record['hostid'] !=
                                               self.instance_id):
                            raise Exception(
                                'service type (' + str(self.__service_name__) +
                                ') already exists in system with different host_id - detail: my_host_id='
                                + str(self.instance_id) + ' db_host_id=' +
                                str(service_record['hostid']))

                # if all checks out, then add/update the registration
                ret = db_services.add(self.instance_id,
                                      self.__service_name__,
                                      service_template,
                                      session=dbsession)

                try:
                    my_service_record = {
                        'hostid': self.instance_id,
                        'servicename': self.__service_name__,
                    }
                    my_service_record.update(service_template)
                    servicestatus.set_my_service_record(my_service_record)
                    self.service_record = my_service_record
                except Exception as err:
                    logger.warn(
                        'could not set local service information - exception: {}'
                        .format(str(err)))

        except Exception as err:
            raise err

        service_record = servicestatus.get_my_service_record()
        servicestatus.set_status(service_record,
                                 up=True,
                                 available=True,
                                 update_db=True,
                                 versions=self.versions)
        logger.info('Service registration complete')
        return True
示例#30
0
    def execute(self):
        log.info('Starting feed update')

        # Feed syncs will update the images with any new cves that are pulled in for a the sync. As such, any images that are loaded while the sync itself is in progress need to be
        # re-scanned for cves since the transaction ordering can result in the images being loaded with data prior to sync but not included in the sync process itself.

        # Create feed task begin event
        error = None
        with session_scope() as session:
            mgr = identities.manager_factory.for_session(session)
            catalog_client = internal_client_for(CatalogClient, userId=None)

        try:
            catalog_client.add_event(
                FeedSyncTaskStarted(
                    groups=self.feeds if self.feeds else 'all'))
        except:
            log.exception('Ignoring event generation error before feed sync')

        start_time = datetime.datetime.utcnow()
        try:
            f = DataFeeds.instance()
            start_time = datetime.datetime.utcnow()

            f.vuln_fn = FeedsUpdateTask.process_updated_vulnerability
            f.vuln_flush_fn = FeedsUpdateTask.flush_vulnerability_matches

            updated_dict = f.sync(to_sync=self.feeds,
                                  full_flush=self.full_flush,
                                  catalog_client=catalog_client)

            log.info('Feed sync complete. Results = {}'.format(updated_dict))
            return updated_dict
        except Exception as e:
            error = e
            log.exception('Failure refreshing and syncing feeds')
            raise
        finally:
            end_time = datetime.datetime.utcnow()
            # log feed sync event
            try:
                if error:
                    catalog_client.add_event(
                        FeedSyncTaskFailed(
                            groups=self.feeds if self.feeds else 'all',
                            error=error))
                else:
                    catalog_client.add_event(
                        FeedSyncTaskCompleted(
                            groups=self.feeds if self.feeds else 'all'))
            except:
                log.exception(
                    'Ignoring event generation error after feed sync')

            try:
                self.rescan_images_created_between(from_time=start_time,
                                                   to_time=end_time)
            except:
                log.exception(
                    'Unexpected exception rescanning vulns for images added during the feed sync'
                )
                raise
            finally:
                end_session()