Esempio n. 1
0
    def initialize_system_identities(self):
        """
        Ensure basic system identities are present
        :param session: DB session to use to query/update. Tx managed externally
        :return: boolean status
        """

        # system user
        try:
            if not self.mgr.get_account(localconfig.SYSTEM_ACCOUNT_NAME):
                self.mgr.create_account(
                    localconfig.SYSTEM_ACCOUNT_NAME,
                    AccountTypes.service,
                    "system@system",
                )

            if not self.mgr.get_user(localconfig.SYSTEM_USERNAME):
                self.mgr.create_user(localconfig.SYSTEM_ACCOUNT_NAME,
                                     localconfig.SYSTEM_USERNAME)
                self.mgr.add_user_credential(
                    username=localconfig.SYSTEM_USERNAME,
                    credential_type=UserAccessCredentialTypes.password,
                )

        except Exception as err:
            logger.exception("Error initializing system identities")
            raise Exception(
                "Initialization failed: could not fetch/add anchore-system user from/to DB - exception: "
                + str(err))

        # admin user
        try:
            if not self.mgr.get_account(localconfig.ADMIN_ACCOUNT_NAME):
                init_email = localconfig.get_config().get(
                    localconfig.DEFAULT_ADMIN_EMAIL_KEY, "admin@myanchore")
                self.mgr.create_account(localconfig.ADMIN_ACCOUNT_NAME,
                                        AccountTypes.admin, init_email)

            if not self.mgr.get_user(localconfig.ADMIN_USERNAME):
                self.mgr.create_user(localconfig.ADMIN_ACCOUNT_NAME,
                                     localconfig.ADMIN_USERNAME)

                init_password = localconfig.get_config().get(
                    localconfig.DEFAULT_ADMIN_PASSWORD_KEY, )

                if not init_password:
                    raise Exception("No default admin password provided")

                self.mgr.add_user_credential(
                    username=localconfig.ADMIN_USERNAME,
                    credential_type=UserAccessCredentialTypes.password,
                    value=init_password,
                )
            return True
        except Exception as err:
            logger.exception("Error initializing system identities")
            raise Exception(
                "Initialization failed: could not fetch/add anchore-system user from/to DB - exception: "
                + str(err))
Esempio n. 2
0
def oauth_config_loader():
    """
    Loads the key configuration from the default location

    :return:
    """

    return localconfig.get_config().get('user_authentication', {}).get('oauth'), localconfig.get_config().get('keys')
def migrate(from_driver_configpath, to_driver_configpath, nodelete=False, dontask=False):
    """
    Migrate the objects in the document archive from one driver backend to the other. This may be a long running operation depending on the number of objects and amount of data to migrate.

    The migration process expects that the source and destination configurations are provided by config files passed in as arguments. The source configuration generally should be the same
    as the configuration in the anchore engine config.yaml.

    The general flow for a migration is:
    1. Stop anchore-engine services (shutdown the entire cluster to ensure no data modifications during migration)
    2. Create a new configuration yaml with at minimum the services.catalog.archive section configured as you would like it when migraton is complete
    3. Run migration
    4. Update the config.yaml for you anchore-engine system to use the new driver.
    5. Start anchore-engine again

    """
    global localconfig

    ecode = 0
    do_migrate = False
    try:
        logger.info('Loading configs')
        from_raw = copy.deepcopy(load_config(configfile=from_driver_configpath))
        get_config().clear()

        to_raw = copy.deepcopy(load_config(configfile=to_driver_configpath))
        get_config().clear()

        from_config = operations.normalize_config(from_raw['services']['catalog'])
        to_config = operations.normalize_config(to_raw['services']['catalog'])

        logger.info('Migration from config: {}'.format(json.dumps(from_config, indent=2)))
        logger.info('Migration to config: {}'.format(json.dumps(to_config, indent=2)))

        if dontask:
            do_migrate = True
        else:
            try:
                answer = raw_input("Performing this operation requires *all* anchore-engine services to be stopped - proceed? (y/N)")
            except:
                answer = "n"
            if 'y' == answer.lower():
                do_migrate = True

        if do_migrate:
            migration.initiate_migration(from_config, to_config, remove_on_source=(not nodelete), do_lock=True)
            logger.info("After this migration, your anchore-engine config.yaml MUST have the following configuration options added before starting up again:")
            if 'archive_data_dir' in to_config:
                logger.info("\tNOTE: for archive_data_dir, the value must be set to the location that is accessible within your anchore-engine container")

            print (yaml.dump(to_config, default_flow_style=False))
        else:
            logger.info("Skipping conversion.")
    except Exception as err:
        logger.error(utils.format_error_output(config, 'dbupgrade', {}, err))
        if not ecode:
            ecode = 2

    utils.doexit(ecode)
Esempio n. 4
0
def handle_metrics(*args, **kwargs):
    """
    Update resource usage metrics

    :param args:
    :param kwargs:
    :return:
    """
    cycle_timer = kwargs["mythread"]["cycle_timer"]

    while True:
        try:
            conf = localconfig.get_config()
            try:
                tmpdir = conf["tmp_dir"]
                svfs = os.statvfs(tmpdir)
                available_bytes = svfs.f_bsize * svfs.f_bavail
                metrics.gauge_set("anchore_tmpspace_available_bytes", available_bytes)
            except Exception as err:
                logger.warn(
                    "unable to detect available bytes probe - exception: " + str(err)
                )
        except Exception as err:
            logger.warn("handler failed - exception: " + str(err))

        time.sleep(cycle_timer)

    return True
Esempio n. 5
0
    def run_feeds_update(cls, json_obj=None, force_flush=False):
        """
        Creates a task and runs it, optionally with a thread if locking is enabled.

        :return:
        """
        error = None
        feeds = None

        with session_scope() as session:
            mgr = identities.manager_factory.for_session(session)
            system_user = mgr.get_system_credentials()

        catalog_client = CatalogClient(user=system_user[0], password=system_user[1])

        try:

            feeds = get_selected_feeds_to_sync(localconfig.get_config())
            if json_obj:
                task = cls.from_json(json_obj)
                if not task:
                    return None
                task.feeds = feeds
            else:
                task = FeedsUpdateTask(feeds_to_sync=feeds, flush=force_flush)

            # Create feed task begin event
            try:
                catalog_client.add_event(FeedSyncStart(groups=feeds if feeds else 'all'))
            except:
                log.exception('Ignoring event generation error before feed sync')

            result = []
            if cls.locking_enabled:
                # system_user = get_system_user_auth()
                run_target_with_lease(user_auth=system_user, lease_id='feed_sync', ttl=90, target=lambda: result.append(task.execute()))
                # A bit of work-around for the lambda def to get result from thread execution
                if result:
                    result = result[0]
            else:
                result = task.execute()

            return result
        except LeaseAcquisitionFailedError as ex:
            error = ex
            log.exception('Could not acquire lock on feed sync, likely another sync already in progress')
            raise Exception('Cannot execute feed sync, lock is held by another feed sync in progress')
        except Exception as e:
            error = e
            log.exception('Error executing feeds update')
            raise e
        finally:
            # log feed sync event
            try:
                if error:
                    catalog_client.add_event(FeedSyncFail(groups=feeds if feeds else 'all', error=error))
                else:
                    catalog_client.add_event(FeedSyncComplete(groups=feeds if feeds else 'all'))
            except:
                log.exception('Ignoring event generation error after feed sync')
Esempio n. 6
0
def handle_feed_sync_trigger(*args, **kwargs):
    """
    Checks to see if there is a task for a feed sync in the queue and if not, adds one.
    Interval for firing this should be longer than the expected feed sync duration.

    :param args:
    :param kwargs:
    :return:
    """
    system_user = _system_creds()

    logger.info('init args: {}'.format(kwargs))
    cycle_time = kwargs['mythread']['cycle_timer']

    while True:
        config = localconfig.get_config()
        feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True)
        if feed_sync_enabled:
            logger.info('Feed Sync task creator activated')
            try:
                push_sync_task(system_user)
                logger.info('Feed Sync Trigger done, waiting for next cycle.')
            except Exception as e:
                logger.error('Error caught in feed sync trigger handler after all retries. Will wait for next cycle')
            finally:
                logger.info('Feed Sync task creator complete')
        else:
            logger.info("sync_enabled is set to false in config - skipping feed sync trigger")

        time.sleep(cycle_time)

    return True
Esempio n. 7
0
def handle_feed_sync(*args, **kwargs):
    """
    Initiates a feed sync in the system in response to a message from the queue

    :param args:
    :param kwargs:
    :return:
    """
    system_user = _system_creds()

    logger.info('init args: {}'.format(kwargs))
    cycle_time = kwargs['mythread']['cycle_timer']

    while True:
        config = localconfig.get_config()
        feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True)
        if feed_sync_enabled:
            logger.info("Feed sync task executor activated")
            try:
                run_feed_sync(system_user)
            except Exception as e:
                logger.error(
                    'Caught escaped error in feed sync handler: {}'.format(e))
            finally:
                logger.info('Feed sync task executor complete')
        else:
            logger.info(
                "sync_enabled is set to false in config - skipping feed sync")

        time.sleep(cycle_time)

    return True
Esempio n. 8
0
def get_endpoints(service_name):
    """
    Return a list of endpoint urls for the given service name.
    :param service_name:
    :return: list of url strings
    """

    local_conf = localconfig.get_config()
    urls = []

    try:
        if service_name + '_endpoint' in local_conf:
            urls = [re.sub("/+$", "", local_conf[service_name + '_endpoint'])]
        else:
            with session_scope() as dbsession:
                service_reports = db_services.get_byname(service_name, session=dbsession)
                if service_reports:
                    for service in service_reports:
                        base_url = service.get('base_url')
                        if base_url:
                            apiversion = service.get('version', '')
                            urls.append('/'.join([base_url, apiversion]))
                        else:
                            raise Exception("cannot load valid endpoint from DB for service {}".format(service_name))

            if not urls:
                raise Exception("cannot locate registered service in DB: " + service_name)
    except Exception as err:
        logger.exception('Error during endpoint lookup for service {}'.format(service_name))
        raise Exception("could not find valid endpoint - exception: " + str(err))

    return urls
Esempio n. 9
0
def test_load_policy_bundle_paths(mock_default_config, tmpdir,
                                  config_filenames):
    # setup files to read
    input_dir = tmpdir.mkdir(INPUT_BUNDLES_DIR)
    mock_test_files(input_dir, config_filenames)
    output_dir_name = tmpdir.strpath + "/bundles"

    # setup the default config
    load_defaults(configdir=tmpdir)

    # function under test
    load_policy_bundle_paths(src_dir=input_dir.strpath)

    # get and validate the relevant config bits
    config = get_config()
    assert config["policy_bundles"] is not None
    assert len(config["policy_bundles"]) == len(config_filenames)
    for config_filename in config_filenames:
        policy_bundle = next(
            policy_bundle for policy_bundle in config["policy_bundles"]
            if policy_bundle["bundle_path"] == output_dir_name + "/" +
            config_filename)
        assert policy_bundle is not None
        if config_filename == "anchore_default_bundle.json":
            assert policy_bundle["active"]
        else:
            assert not policy_bundle["active"]
        assert os.path.exists(policy_bundle["bundle_path"])
Esempio n. 10
0
def do_feed_sync(msg):
    if 'FeedsUpdateTask' not in locals():
        from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask

    if 'get_selected_feeds_to_sync' not in locals():
        from anchore_engine.services.policy_engine.engine.feeds import get_selected_feeds_to_sync

    handler_success = False
    timer = time.time()
    logger.info("FIRING: feed syncer")
    try:
        feeds = get_selected_feeds_to_sync(localconfig.get_config())
        logger.info('Syncing configured feeds: {}'.format(feeds))
        result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get('data'))

        if result is not None:
            handler_success = True
        else:
            logger.warn('Feed sync task marked as disabled, so skipping')
    except ValueError as e:
        logger.warn('Received msg of wrong type')
    except Exception as err:
        logger.warn("failure in feed sync handler - exception: " + str(err))

    if handler_success:
        anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer, function='do_feed_sync', status="success")
    else:
        anchore_engine.subsys.metrics.summary_observe('anchore_monitor_runtime_seconds', time.time() - timer, function='do_feed_sync', status="fail")
def create_feed_update(notification):
    """
    Creates a feed data update notification.

    :param notification:
    :return:
    """
    if not connexion.request.is_json:
        abort(400)

    notification = FeedUpdateNotification.from_dict(notification)
    result = []
    try:
        feeds = get_selected_feeds_to_sync(localconfig.get_config())
        task = FeedsUpdateTask(feeds_to_sync=feeds)
        result = task.execute()
    except HTTPException:
        raise
    except Exception as e:
        log.exception('Error executing feed update task')
        abort(
            Response(status=500,
                     response=json.dumps({
                         'error':
                         'feed sync failure',
                         'details':
                         'Failure syncing feed: {}'.format(e.message)
                     }),
                     mimetype='application/json'))

    return jsonify(['{}/{}'.format(x[0], x[1]) for x in result]), 200
Esempio n. 12
0
def perform_analyze(
    account,
    manifest,
    image_record,
    registry_creds,
    layer_cache_enable=False,
    parent_manifest=None,
):
    ret_analyze = {}

    loaded_config = get_config()
    tmpdir = get_tempdir(loaded_config)

    use_cache_dir = None
    if layer_cache_enable:
        use_cache_dir = os.path.join(tmpdir, "anchore_layercache")

    # choose the first TODO possible more complex selection here
    try:
        image_detail = image_record["image_detail"][0]
        registry_manifest = manifest
        registry_parent_manifest = parent_manifest
        pullstring = (image_detail["registry"] + "/" + image_detail["repo"] +
                      "@" + image_detail["imageDigest"])
        fulltag = (image_detail["registry"] + "/" + image_detail["repo"] +
                   ":" + image_detail["tag"])
        logger.debug("using pullstring (" + str(pullstring) +
                     ") and fulltag (" + str(fulltag) + ") to pull image data")
    except Exception as err:
        image_detail = pullstring = fulltag = None
        raise Exception(
            "failed to extract requisite information from image_record - exception: "
            + str(err))

    timer = int(time.time())
    logger.spew("timing: analyze start: " + str(int(time.time()) - timer))
    logger.info("performing analysis on image: " +
                str([account, pullstring, fulltag]))

    logger.debug("obtaining anchorelock..." + str(pullstring))
    with anchore_engine.clients.localanchore_standalone.get_anchorelock(
            lockId=pullstring, driver="nodocker"):
        logger.debug("obtaining anchorelock successful: " + str(pullstring))
        logger.info("analyzing image: %s", pullstring)
        analyzed_image_report, manifest_raw = localanchore_standalone.analyze_image(
            account,
            registry_manifest,
            image_record,
            tmpdir,
            loaded_config,
            registry_creds=registry_creds,
            use_cache_dir=use_cache_dir,
            parent_manifest=registry_parent_manifest,
        )
        ret_analyze = analyzed_image_report

    logger.info("performing analysis on image complete: " + str(pullstring))

    return ret_analyze
Esempio n. 13
0
def keys_config_loader():
    """
    Loads the key configuration from the default location

    :return:
    """

    return localconfig.get_config().get('keys')
Esempio n. 14
0
def _system_creds():
    global system_user_auth

    if not system_user_auth:
        config = localconfig.get_config()
        system_user_auth = config['system_user_auth']

    return system_user_auth
Esempio n. 15
0
def archive_data_upgrade_005_006():
    """
    Upgrade the document archive data schema and move the data appropriately.
    Assumes both tables are in place (archive_document, archive_document_reference, object_storage)

    :return:
    """

    from anchore_engine.db import LegacyArchiveDocument, session_scope, ObjectStorageMetadata
    from anchore_engine.subsys import object_store
    from anchore_engine.subsys.object_store.config import DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY
    from anchore_engine.configuration import localconfig

    config = localconfig.get_config()
    object_store.initialize(config.get('services', {}).get('catalog', {}),
                            manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
                            config_keys=(DEFAULT_OBJECT_STORE_MANAGER_ID,
                                         ALT_OBJECT_STORE_CONFIG_KEY),
                            allow_legacy_fallback=True)
    client = anchore_engine.subsys.object_store.manager.get_manager(
    ).primary_client

    session_counter = 0
    max_pending_session_size = 10000

    with session_scope() as db_session:
        for doc in db_session.query(LegacyArchiveDocument.userId,
                                    LegacyArchiveDocument.bucket,
                                    LegacyArchiveDocument.archiveId,
                                    LegacyArchiveDocument.documentName,
                                    LegacyArchiveDocument.created_at,
                                    LegacyArchiveDocument.last_updated,
                                    LegacyArchiveDocument.record_state_key,
                                    LegacyArchiveDocument.record_state_val):
            meta = ObjectStorageMetadata(userId=doc[0],
                                         bucket=doc[1],
                                         archiveId=doc[2],
                                         documentName=doc[3],
                                         is_compressed=False,
                                         document_metadata=None,
                                         content_url=client.uri_for(
                                             userId=doc[0],
                                             bucket=doc[1],
                                             key=doc[2]),
                                         created_at=doc[4],
                                         last_updated=doc[5],
                                         record_state_key=doc[6],
                                         record_state_val=doc[6])

            db_session.add(meta)

            session_counter += 1

            if session_counter >= max_pending_session_size:
                db_session.flush()
                session_counter = 0
Esempio n. 16
0
def test_empty_src_dirs(mock_default_config, tmpdir):
    # setup the default config
    load_defaults(configdir=tmpdir)

    # function under test
    load_policy_bundle_paths(src_dirs=[])

    # get and validate the relevant config bits
    config = get_config()
    assert config["policy_bundles"] is None
Esempio n. 17
0
    def init_oauth(self):
        # Initialize the oauth stuff as needed.
        expiration_config = {
            'authorization_code': 864000,
            'implicit': 3600,
            'password': int(localconfig.get_config()['user_authentication']['oauth'].get('default_token_expiration_seconds')),
            'client_credentials': 864000
        }

        self._oauth_app = init_oauth(self._application.app, [PasswordGrant], expiration_config)
Esempio n. 18
0
def token_manager(config=None):
    global _token_manager
    if _token_manager is None:
        if config is None:
            config = localconfig.get_config()

        assert config is not None
        oauth_config, keys_config = oauth_config_loader(config)
        _token_manager = JwtTokenManager(oauth_config, keys_config)

    return _token_manager
Esempio n. 19
0
def get_tempdir(config=None):
    """
    Return the configured temp dir from the provided config or loaded from global if no config provided

    """
    c = get_config() if not config else config
    try:
        return c["tmp_dir"]
    except Exception as err:
        logger.warn("could not get tmp_dir from localconfig - exception: " +
                    str(err))
        return "/tmp"
Esempio n. 20
0
def handle_feed_sync_trigger(*args, **kwargs):
    """
    Checks to see if there is a task for a feed sync in the queue and if not, adds one.
    Interval for firing this should be longer than the expected feed sync duration.

    :param args:
    :param kwargs:
    :return:
    """
    system_user = _system_creds()

    logger.info('init args: {}'.format(kwargs))
    cycle_time = kwargs['mythread']['cycle_timer']

    while True:

        config = localconfig.get_config()
        feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True)

        if feed_sync_enabled:
            try:
                all_ready = anchore_engine.clients.common.check_services_ready(
                    ['simplequeue'])
                if not all_ready:
                    logger.info(
                        "simplequeue service not yet ready, will retry")
                else:
                    logger.info('Feed Sync Trigger activated')
                    if not simplequeue.is_inqueue(userId=system_user,
                                                  name=feed_sync_queuename,
                                                  inobj=feed_sync_msg):
                        try:
                            simplequeue.enqueue(userId=system_user,
                                                name=feed_sync_queuename,
                                                inobj=feed_sync_msg)
                        except:
                            logger.exception(
                                'Could not enqueue message for a feed sync')
                    logger.info(
                        'Feed Sync Trigger done, waiting for next cycle.')
            except Exception as e:
                logger.exception(
                    'Error caught in feed sync trigger handler. Will continue. Exception: {}'
                    .format(e))
        else:
            logger.debug(
                "sync_enabled is set to false in config - skipping feed sync trigger"
            )

        time.sleep(cycle_time)

    return True
Esempio n. 21
0
def create_account(account):
    """
    POST /accounts

    :param account:
    :return:
    """

    try:
        try:
            can_create_account(account)
        except ValueError as ex:
            return make_response_error('Invalid account request: {}'.format(
                ex.args[0]),
                                       in_httpcode=400), 400
        except Exception as ex:
            logger.exception('Unexpected exception in account validation')
            return make_response_error('Invalid account request',
                                       in_httpcode=400), 400

        with session_scope() as session:
            mgr = manager_factory.for_session(session)
            try:
                resp = mgr.create_account(account_name=account['name'],
                                          account_type=account.get(
                                              'type', AccountTypes.user.value),
                                          email=account.get('email'))
            except ValueError as ex:
                return make_response_error('Validation failed: {}'.format(ex),
                                           in_httpcode=400), 400

            authorizer.notify(NotificationTypes.domain_created,
                              account['name'])

            # Initialize account stuff
            try:
                _init_policy(account['name'], config=get_config())
            except Exception:
                logger.exception(
                    'Could not initialize policy bundle for new account: {}'.
                    format(account['name']))
                raise

        return account_db_to_msg(resp), 200
    except AccountAlreadyExistsError as ex:
        return make_response_error(errmsg='Account already exists',
                                   in_httpcode=400), 400
    except Exception as ex:
        logger.exception('Unexpected Error creating account')
        return make_response_error('Error creating account',
                                   in_httpcode=500), 500
Esempio n. 22
0
def handle_layer_cache():
    """
    Do layer cache cleanup

    :return:
    """

    localconfig = get_config()
    myconfig = localconfig["services"]["analyzer"]

    cachemax_gbs = int(myconfig.get("layer_cache_max_gigabytes", 1))
    cachemax = cachemax_gbs * 1000000000

    try:
        tmpdir = localconfig["tmp_dir"]
    except Exception as err:
        logger.warn("could not get tmp_dir from localconfig - exception: " +
                    str(err))
        tmpdir = "/tmp"
    use_cache_dir = os.path.join(tmpdir, "anchore_layercache")
    if os.path.exists(use_cache_dir):
        totalsize = 0
        layertimes = {}
        layersizes = {}

        for f in os.listdir(os.path.join(use_cache_dir, "sha256")):
            layerfile = os.path.join(use_cache_dir, "sha256", f)
            layerstat = os.stat(layerfile)
            totalsize = totalsize + layerstat.st_size
            layersizes[layerfile] = layerstat.st_size
            layertimes[layerfile] = max(
                [layerstat.st_mtime, layerstat.st_ctime, layerstat.st_atime])

        if totalsize > cachemax:
            logger.debug("layer cache total size (" + str(totalsize) +
                         ") exceeds configured cache max (" + str(cachemax) +
                         ") - performing cleanup")
            currsize = totalsize
            sorted_layers = sorted(list(layertimes.items()),
                                   key=operator.itemgetter(1))
            while currsize > cachemax:
                rmlayer = sorted_layers.pop(0)
                logger.debug("removing cached layer: " + str(rmlayer))
                os.remove(rmlayer[0])
                currsize = currsize - layersizes[rmlayer[0]]
                logger.debug("currsize after remove: " + str(currsize))

    return True
Esempio n. 23
0
def archive_data_upgrade_005_006():
    """
    Upgrade the document archive data schema and move the data appropriately.
    Assumes both tables are in place (archive_document, archive_document_reference, object_storage)

    :return:
    """

    from anchore_engine.db import ArchiveDocument, session_scope, ArchiveMetadata
    from anchore_engine.subsys import archive
    from anchore_engine.subsys.archive import operations
    from anchore_engine.configuration import localconfig

    config = localconfig.get_config()
    archive.initialize(config.get('services', {}).get('catalog', {}))
    client = operations.get_archive().primary_client

    session_counter = 0
    max_pending_session_size = 10000

    with session_scope() as db_session:
        for doc in db_session.query(
                ArchiveDocument.userId, ArchiveDocument.bucket,
                ArchiveDocument.archiveId, ArchiveDocument.documentName,
                ArchiveDocument.created_at, ArchiveDocument.last_updated,
                ArchiveDocument.record_state_key,
                ArchiveDocument.record_state_val):
            meta = ArchiveMetadata(userId=doc[0],
                                   bucket=doc[1],
                                   archiveId=doc[2],
                                   documentName=doc[3],
                                   is_compressed=False,
                                   document_metadata=None,
                                   content_url=client.uri_for(userId=doc[0],
                                                              bucket=doc[1],
                                                              key=doc[2]),
                                   created_at=doc[4],
                                   last_updated=doc[5],
                                   record_state_key=doc[6],
                                   record_state_val=doc[6])

            db_session.add(meta)

            session_counter += 1

            if session_counter >= max_pending_session_size:
                db_session.flush()
                session_counter = 0
Esempio n. 24
0
def _load_config(config_option, validate_params=None):
    try:
        # config and init
        configfile = configdir = None
        if config_option:
            configdir = config_option
            configfile = os.path.join(config_option, 'config.yaml')

        localconfig.load_config(configdir=configdir, configfile=configfile, validate_params=validate_params)
        my_config = localconfig.get_config()
        my_config['myservices'] = []
        logger.spew("localconfig=" + json.dumps(my_config, indent=4, sort_keys=True))
        return my_config
    except Exception as err:
        logger.error("cannot load configuration: exception - " + str(err))
        raise err
Esempio n. 25
0
    def init_oauth(self):
        # Initialize the oauth stuff as needed.
        expiration_config = {
            "authorization_code":
            864000,
            "implicit":
            3600,
            "password":
            int(localconfig.get_config()["user_authentication"]["oauth"].get(
                "default_token_expiration_seconds")),
            "client_credentials":
            864000,
        }

        self._oauth_app = init_oauth(self._application.app, [PasswordGrant],
                                     expiration_config)
Esempio n. 26
0
def handle_feed_sync(*args, **kwargs):
    """
    Initiates a feed sync in the system in response to a message from the queue

    :param args:
    :param kwargs:
    :return:
    """
    system_user = _system_creds()

    logger.info('init args: {}'.format(kwargs))
    cycle_time = kwargs['mythread']['cycle_timer']

    while True:

        config = localconfig.get_config()
        feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True)

        if feed_sync_enabled:
            try:
                all_ready = anchore_engine.clients.common.check_services_ready(
                    ['simplequeue'])
                if not all_ready:
                    logger.info(
                        "simplequeue service not yet ready, will retry")
                else:
                    try:
                        simplequeue.run_target_with_queue_ttl(
                            system_user,
                            queue=feed_sync_queuename,
                            target=do_feed_sync,
                            max_wait_seconds=30,
                            visibility_timeout=180)
                    except Exception as err:
                        logger.warn("failed to process task this cycle: " +
                                    str(err))
            except Exception as e:
                logger.error(
                    'Caught escaped error in feed sync handler: {}'.format(e))
        else:
            logger.debug(
                "sync_enabled is set to false in config - skipping feed sync")

        time.sleep(cycle_time)

    return True
Esempio n. 27
0
def _init_feeds():
    """
    Perform an initial feed sync using a bulk-sync if no sync has been done yet.

    :return:
    """

    image_count_bulk_sync_threshold = 0  # More than this many images will result in the system doing a regular sync instead of a bulk sync.

    logger.info('Initializing feeds if necessary')
    from anchore_engine.services.policy_engine.engine import vulnerabilities, feeds
    from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask, InitialFeedSyncTask

    feeds = feeds.get_selected_feeds_to_sync(localconfig.get_config())
    task = InitialFeedSyncTask(feeds_to_sync=feeds)
    task.execute()

    return True
Esempio n. 28
0
def test_load_filepath_to_config(mock_default_config, tmpdir, config_key,
                                 config_filename):
    # setup files to read
    input_dir = tmpdir.mkdir(INPUT_CONFIG_DIR)
    mock_test_file(input_dir, config_filename)
    output_dir_name = tmpdir.strpath

    # setup the default config
    load_defaults(configdir=tmpdir)

    load_filepath_to_config(config_key,
                            config_filename,
                            src_dir=input_dir.strpath)
    config = get_config()
    assert config["anchore_scanner_analyzer_config_file"] is not None
    assert (config["anchore_scanner_analyzer_config_file"] == output_dir_name +
            "/" + config_filename)
    assert os.path.exists(config["anchore_scanner_analyzer_config_file"])
Esempio n. 29
0
    def get_system_credentials(self):
        """
        Get system credentials, from the local cache if available first
        :return: (username, password) tuple
        """
        lc = localconfig.get_config()
        if 'system_user_auth' in lc and lc['system_user_auth'] != (None, None):
            creds = lc['system_user_auth']
            logger.debug('Using creds found in config: {}'.format(creds))

            if type(creds) in [tuple, list]:
                return HttpBasicCredential(creds[0], creds[1])
            elif type(creds) == str:
                # Assume its a bearer token
                return HttpBearerCredential(token=creds, expiration=None)
            else:
                return creds

        return self._get_system_user_credentials()
Esempio n. 30
0
def process_preflight():
    """
    Execute the preflight functions, aborting service startup if any throw uncaught exceptions or return False return value

    :return:
    """

    config = localconfig.get_config()

    # read the global feed disable parameter
    feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True)

    # get the list of feeds if they have been explicitly configured in config.yaml
    feed_enabled_status = config.get('feeds', {}).get('selective_sync',
                                                      {}).get('feeds', {})

    # check to see if the engine is configured to sync at least one data feed
    at_least_one = False
    for feed in feed_enabled_status.keys():
        if feed_enabled_status[feed]:
            at_least_one = True
            break

    # toggle credential validation based on whether or not any feeds are configured to sync
    skip_credential_validate = False
    if not feed_sync_enabled or not at_least_one:
        logger.info(
            "Engine is configured to skip data feed syncs - skipping feed sync client check"
        )
        skip_credential_validate = True

    preflight_check_functions = [_init_db_content]
    if not skip_credential_validate:
        preflight_check_functions.append(_check_feed_client_credentials)

    for fn in preflight_check_functions:
        try:
            fn()
        except Exception as e:
            logger.exception(
                'Preflight checks failed with error: {}. Aborting service startup'
                .format(e))
            sys.exit(1)