def migrate(from_driver_configpath, to_driver_configpath, nodelete=False, dontask=False):
    """
    Migrate the objects in the document archive from one driver backend to the other. This may be a long running operation depending on the number of objects and amount of data to migrate.

    The migration process expects that the source and destination configurations are provided by config files passed in as arguments. The source configuration generally should be the same
    as the configuration in the anchore engine config.yaml.

    The general flow for a migration is:
    1. Stop anchore-engine services (shutdown the entire cluster to ensure no data modifications during migration)
    2. Create a new configuration yaml with at minimum the services.catalog.archive section configured as you would like it when migraton is complete
    3. Run migration
    4. Update the config.yaml for you anchore-engine system to use the new driver.
    5. Start anchore-engine again

    """
    global localconfig

    ecode = 0
    do_migrate = False
    try:
        logger.info('Loading configs')
        from_raw = copy.deepcopy(load_config(configfile=from_driver_configpath))
        get_config().clear()

        to_raw = copy.deepcopy(load_config(configfile=to_driver_configpath))
        get_config().clear()

        from_config = operations.normalize_config(from_raw['services']['catalog'])
        to_config = operations.normalize_config(to_raw['services']['catalog'])

        logger.info('Migration from config: {}'.format(json.dumps(from_config, indent=2)))
        logger.info('Migration to config: {}'.format(json.dumps(to_config, indent=2)))

        if dontask:
            do_migrate = True
        else:
            try:
                answer = raw_input("Performing this operation requires *all* anchore-engine services to be stopped - proceed? (y/N)")
            except:
                answer = "n"
            if 'y' == answer.lower():
                do_migrate = True

        if do_migrate:
            migration.initiate_migration(from_config, to_config, remove_on_source=(not nodelete), do_lock=True)
            logger.info("After this migration, your anchore-engine config.yaml MUST have the following configuration options added before starting up again:")
            if 'archive_data_dir' in to_config:
                logger.info("\tNOTE: for archive_data_dir, the value must be set to the location that is accessible within your anchore-engine container")

            print (yaml.dump(to_config, default_flow_style=False))
        else:
            logger.info("Skipping conversion.")
    except Exception as err:
        logger.error(utils.format_error_output(config, 'dbupgrade', {}, err))
        if not ecode:
            ecode = 2

    utils.doexit(ecode)
Esempio n. 2
0
def _load_config(config_option, validate_params=None):
    try:
        # config and init
        configfile = configdir = None
        if config_option:
            configdir = config_option
            configfile = os.path.join(config_option, 'config.yaml')

        localconfig.load_config(configdir=configdir, configfile=configfile, validate_params=validate_params)
        my_config = localconfig.get_config()
        my_config['myservices'] = []
        logger.spew("localconfig=" + json.dumps(my_config, indent=4, sort_keys=True))
        return my_config
    except Exception as err:
        logger.error("cannot load configuration: exception - " + str(err))
        raise err
Esempio n. 3
0
def analyzers(ctx_config):
    global config, module
    config = localconfig.load_config(configdir=ctx_config['configdir'])

    try:
        # do some DB connection/pre-checks here
        try:

            log_level = 'INFO'
            if ctx_config['debug']:
                log_level = 'DEBUG'
            logger.set_log_level(log_level, log_to_stdout=True)
        except Exception as err:
            raise err

    except Exception as err:
        logger.error(anchore_manager.cli.utils.format_error_output(ctx_config, 'db', {}, err))
        sys.exit(2)
Esempio n. 4
0
def migrate(from_driver_configpath,
            to_driver_configpath,
            from_analysis_archive=False,
            to_analysis_archive=False,
            nodelete=False,
            dontask=False,
            bucket=None):
    """
    Migrate the objects in the document archive from one driver backend to the other. This may be a long running operation depending on the number of objects and amount of data to migrate.

    The migration process expects that the source and destination configurations are provided by config files passed in as arguments. The source configuration generally should be the same
    as the configuration in the anchore engine config.yaml.

    The general flow for a migration is:
    1. Stop anchore-engine services (shutdown the entire cluster to ensure no data modifications during migration)
    2. Create a new configuration yaml with at minimum the services.catalog.archive section configured as you would like it when migraton is complete
    3. Run migration
    4. Update the config.yaml for you anchore-engine system to use the new driver.
    5. Start anchore-engine again

    """

    ecode = 0
    do_migrate = False
    try:
        logger.info('Loading configs')
        from_raw = copy.deepcopy(
            load_config(configfile=from_driver_configpath))
        get_config().clear()

        to_raw = copy.deepcopy(load_config(configfile=to_driver_configpath))

        if from_analysis_archive:
            # Only use the specific key for the source, fail if not found
            from_config = obj_config.extract_config(
                from_raw['services']['catalog'],
                config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID])
        else:
            from_config = obj_config.extract_config(
                from_raw['services']['catalog'],
                config_keys=[
                    DEFAULT_OBJECT_STORE_MANAGER_ID,
                    ALT_OBJECT_STORE_CONFIG_KEY
                ])

        if from_config:
            from_config = obj_config.normalize_config(from_config,
                                                      legacy_fallback=False)
            logger.info('Migration from config: {}'.format(
                json.dumps(from_config, indent=2)))
        else:
            if from_analysis_archive:
                config_key = ANALYSIS_ARCHIVE_MANAGER_ID
            else:
                config_key = '"' + DEFAULT_OBJECT_STORE_MANAGER_ID + '" or "' + ALT_OBJECT_STORE_CONFIG_KEY + '"'
            raise Exception(
                'No valid source configuration found. Needed a configuration section with key {} in the catalog service key'
                .format(config_key))

        if to_analysis_archive:
            # Only use the specific key if set, fail if not found
            to_config = obj_config.extract_config(
                to_raw['services']['catalog'],
                config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID])
        else:
            to_config = obj_config.extract_config(
                to_raw['services']['catalog'],
                config_keys=[
                    DEFAULT_OBJECT_STORE_MANAGER_ID,
                    ALT_OBJECT_STORE_CONFIG_KEY
                ])

        if to_config:
            logger.info('Migration to config: {}'.format(
                json.dumps(to_config, indent=2)))
            to_config = obj_config.normalize_config(to_config,
                                                    legacy_fallback=False)
        else:
            if to_analysis_archive:
                config_key = '"' + ANALYSIS_ARCHIVE_MANAGER_ID + '"'
            else:
                config_key = '"' + DEFAULT_OBJECT_STORE_MANAGER_ID + '" or "' + ALT_OBJECT_STORE_CONFIG_KEY + '"'
            raise Exception(
                'No valid destination configuration found. Needed a configuration section with key {} in the catalog service key'
                .format(config_key))

        if dontask:
            do_migrate = True
        else:
            try:
                answer = input(
                    "Performing this operation requires *all* anchore-engine services to be stopped - proceed? (y/N)"
                )
            except:
                answer = "n"
            if 'y' == answer.lower():
                do_migrate = True

        if do_migrate:
            migration.initiate_migration(from_config,
                                         to_config,
                                         remove_on_source=(not nodelete),
                                         do_lock=True,
                                         buckets_to_migrate=bucket)
            logger.info(
                "After this migration, your anchore-engine config.yaml MUST have the following configuration options added before starting up again:"
            )
            if 'archive_data_dir' in to_config:
                logger.info(
                    "\tNOTE: for archive_data_dir, the value must be set to the location that is accessible within your anchore-engine container"
                )

            logger.info((yaml.dump(to_config, default_flow_style=False)))
        else:
            logger.info("Skipping conversion.")
    except Exception as err:
        logger.error(utils.format_error_output(config, 'dbupgrade', {}, err))
        if not ecode:
            ecode = 2

    utils.doexit(ecode)
Esempio n. 5
0
def check(configfile, analysis_archive):
    """
    Test the configuration in the expected anchore-engine config location or override that and use the configuration file provided as an option.

    To test, the system will read and write a very small data document to the driver and then delete it on completion.

    :param configfile:
    :return:
    """

    logger.info('Using config file {}'.format(configfile))
    sys_config = load_config(configfile=configfile)

    if sys_config:
        service_config = sys_config['services']['catalog']
    else:
        service_config = None

    if not service_config:
        logger.info(
            'No configuration file or content available. Cannot test archive driver configuration'
        )
        utils.doexit(2)

    if analysis_archive:
        try:
            object_store.initialize(service_config,
                                    manager_id=ANALYSIS_ARCHIVE_MANAGER_ID,
                                    config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID])
        except:
            logger.error(
                'No "analysis_archive" configuration section found in the configuration. To check a config that uses the default backend for analysis archive data, use the regular object storage check'
            )
            utils.doexit(2)

        mgr = object_store.get_manager(ANALYSIS_ARCHIVE_MANAGER_ID)
    else:
        object_store.initialize(service_config,
                                manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
                                config_keys=[
                                    DEFAULT_OBJECT_STORE_MANAGER_ID,
                                    ALT_OBJECT_STORE_CONFIG_KEY
                                ])
        mgr = object_store.get_manager()

    test_user_id = 'test'
    test_bucket = 'anchorecliconfigtest'
    test_archive_id = 'cliconfigtest'
    test_data = 'clitesting at {}'.format(
        datetime.datetime.utcnow().isoformat())

    logger.info(
        'Checking existence of test document with user_id = {}, bucket = {} and archive_id = {}'
        .format(test_user_id, test_bucket, test_archive_id))
    if mgr.exists(test_user_id, test_bucket, test_archive_id):
        test_archive_id = 'cliconfigtest2'
        if mgr.exists(test_user_id, test_bucket, test_archive_id):
            logger.error(
                'Found existing records for archive doc to test, aborting test to avoid overwritting any existing data'
            )
            utils.doexit(1)

    logger.info(
        'Creating test document with user_id = {}, bucket = {} and archive_id = {}'
        .format(test_user_id, test_bucket, test_archive_id))
    result = mgr.put(test_user_id,
                     test_bucket,
                     test_archive_id,
                     data=test_data)
    if not result:
        logger.warn(
            'Got empty response form archive PUT operation: {}'.format(result))

    logger.info('Checking document fetch')
    loaded = str(mgr.get(test_user_id, test_bucket, test_archive_id), 'utf-8')
    if not loaded:
        logger.error(
            'Failed retrieving the written document. Got: {}'.format(loaded))
        utils.doexit(5)

    if str(loaded) != test_data:
        logger.error(
            'Failed retrieving the written document. Got something other than expected. Expected: "{}" Got: "{}"'
            .format(test_data, loaded))
        utils.doexit(5)

    logger.info('Removing test object')
    mgr.delete(test_user_id, test_bucket, test_archive_id)

    if mgr.exists(test_user_id, test_bucket, test_archive_id):
        logger.error('Found archive object after it should have been removed')
        utils.doexit(5)

    logger.info('Archive config check completed successfully')
Esempio n. 6
0
def check(configfile):
    """
    Test the configuration in the expected anchore-engine config location or override that and use the configuration file provided as an option.

    To test, the system will read and write a very small data document to the driver and then delete it on completion.

    :param configfile:
    :return:
    """

    logger.info('Using config file {}'.format(configfile))
    sys_config = load_config(configfile=configfile)

    if sys_config:
        service_config = sys_config['services']['catalog']
    else:
        service_config = None

    if not service_config:
        logger.error(
            'No configuration file or content available. Cannot test archive driver configuration'
        )
        utils.doexit(2)

    archive.initialize(service_config)

    test_user_id = 'test'
    test_bucket = 'anchorecliconfigtest'
    test_archive_id = 'cliconfigtest'
    test_data = 'clitesting at {}'.format(
        datetime.datetime.utcnow().isoformat())

    logger.info(
        'Checking existence of test document with user_id = {}, bucket = {} and archive_id = {}'
        .format(test_user_id, test_bucket, test_archive_id))
    if archive.exists(test_user_id, test_bucket, test_archive_id):
        test_archive_id = 'cliconfigtest2'
        if archive.exists(test_user_id, test_bucket, test_archive_id):
            logger.error(
                'Found existing records for archive doc to test, aborting test to avoid overwritting any existing data'
            )
            utils.doexit(1)

    logger.info(
        'Creating test document with user_id = {}, bucket = {} and archive_id = {}'
        .format(test_user_id, test_bucket, test_archive_id))
    result = archive.put(test_user_id,
                         test_bucket,
                         test_archive_id,
                         data=test_data)
    if not result:
        logger.warn(
            'Warning: Got empty response form archive PUT operation: {}'.
            format(result))

    logger.info('Checking document fetch')
    loaded = str(archive.get(test_user_id, test_bucket, test_archive_id),
                 'utf-8')
    if not loaded:
        logger.error(
            'Failed retrieving the written document. Got: {}'.format(loaded))
        utils.doexit(5)

    if str(loaded) != test_data:
        logger.error(
            'Failed retrieving the written document. Got something other than expected. Expected: "{}" Got: "{}"'
            .format(test_data, loaded))
        utils.doexit(5)

    logger.info('Removing test object')
    archive.delete(test_user_id, test_bucket, test_archive_id)

    if archive.exists(test_user_id, test_bucket, test_archive_id):
        logger.error('Found archive object after it should have been removed')
        utils.doexit(5)

    logger.info('Archive config check completed successfully')
import sys
from anchore_engine.configuration import localconfig
from anchore_engine import db
from anchore_engine.db.entities import upgrade
from anchore_engine.subsys import logger

logger.enable_bootstrap_logging()

if __name__ == '__main__':
    conf = sys.argv[1]
    localconfig.load_config(conf)
    db.initialize(localconfig.get_config())
    logger.info("Running upgrade test...")
    logger.info("Found version: {}".format(upgrade.get_versions()))
    upgrade.run_upgrade()
    logger.info("Found version: {}".format(upgrade.get_versions()))
    logger.info("Upgrade complete")
Esempio n. 8
0
def analyzers(ctx_config):
    global config, module, click_config

    config = localconfig.load_config(configdir=ctx_config['configdir'])
    click_config = ctx_config
Esempio n. 9
0
def check(configfile, analysis_archive):
    """
    Test the configuration in the expected anchore-engine config location or override that and use the configuration file provided as an option.

    To test, the system will read and write a very small data document to the driver and then delete it on completion.
    """

    db_conf = db_context()
    db_preflight(db_conf["params"], db_conf["retries"])

    logger.info("Using config file {}".format(configfile))
    sys_config = load_config(configfile=configfile)

    if sys_config:
        service_config = sys_config["services"]["catalog"]
    else:
        service_config = None

    if not service_config:
        logger.info(
            "No configuration file or content available. Cannot test archive driver configuration"
        )
        fail_exit()

    if analysis_archive:
        try:
            object_store.initialize(
                service_config,
                manager_id=ANALYSIS_ARCHIVE_MANAGER_ID,
                config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID],
            )
        except:
            logger.error(
                'No "analysis_archive" configuration section found in the configuration. To check a config that uses the default backend for analysis archive data, use the regular object storage check'
            )
            fail_exit()

        mgr = object_store.get_manager(ANALYSIS_ARCHIVE_MANAGER_ID)
    else:
        object_store.initialize(
            service_config,
            manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
            config_keys=[
                DEFAULT_OBJECT_STORE_MANAGER_ID, ALT_OBJECT_STORE_CONFIG_KEY
            ],
        )
        mgr = object_store.get_manager()

    test_user_id = "test"
    test_bucket = "anchorecliconfigtest"
    test_archive_id = "cliconfigtest"
    test_data = "clitesting at {}".format(
        datetime.datetime.utcnow().isoformat())

    logger.info(
        "Checking existence of test document with user_id = {}, bucket = {} and archive_id = {}"
        .format(test_user_id, test_bucket, test_archive_id))
    if mgr.exists(test_user_id, test_bucket, test_archive_id):
        test_archive_id = "cliconfigtest2"
        if mgr.exists(test_user_id, test_bucket, test_archive_id):
            logger.error(
                "Found existing records for archive doc to test, aborting test to avoid overwritting any existing data"
            )
            doexit(1)

    logger.info(
        "Creating test document with user_id = {}, bucket = {} and archive_id = {}"
        .format(test_user_id, test_bucket, test_archive_id))
    result = mgr.put(test_user_id,
                     test_bucket,
                     test_archive_id,
                     data=test_data)
    if not result:
        logger.warn(
            "Got empty response form archive PUT operation: {}".format(result))

    logger.info("Checking document fetch")
    loaded = str(mgr.get(test_user_id, test_bucket, test_archive_id), "utf-8")
    if not loaded:
        logger.error(
            "Failed retrieving the written document. Got: {}".format(loaded))
        doexit(ExitCode.obj_store_failed)

    if str(loaded) != test_data:
        logger.error(
            'Failed retrieving the written document. Got something other than expected. Expected: "{}" Got: "{}"'
            .format(test_data, loaded))
        doexit(ExitCode.obj_store_failed)

    logger.info("Removing test object")
    mgr.delete(test_user_id, test_bucket, test_archive_id)

    if mgr.exists(test_user_id, test_bucket, test_archive_id):
        logger.error("Found archive object after it should have been removed")
        doexit(ExitCode.obj_store_failed)

    logger.info("Archive config check completed successfully")