示例#1
0
def initialize(service_config, force=False, check_db=False, manager_id=None, config_keys=None, allow_legacy_fallback=False):
    """
    Initialize a global object storeage manager for service usage using the given id for lookup (manager_id)

    This is not thread-safe, so should be called during single-threaded service bootstrap/init. It should not
    be in the hot-path for any request execution.

    This function does some convenience config handling, for a direct initialization with prepared object storage configuration, use initialize_direct()

    :param service_config: catalog service configuration from which to extract the archive configuration
    :param force: re-initialize even if already initialized
    :param check_db: evaluate the existing db to see if drivers are present to support the data in the db
    :param manager_id: the id for the manager to initializez, will be the id to use in the get_manager() call to get this manager
    :param config_keys: tuple of keys in precedence order to search for in the service_config dict to find the config for the manager
    :param allow_legacy_fallback: boolean toggle to support very old (pre 0.2.4) object store configuration formats
    :return: true if initialized a new manager, false if already present and no-op
    """

    global manager_singleton

    if manager_singleton.get(manager_id) is not None and not force:
        # Already initialized, no-op
        return False

    obj_store_config = object_store.config.extract_config(service_config, config_keys=config_keys)
    archive_config = normalize_config(obj_store_config, legacy_fallback=allow_legacy_fallback, service_config=service_config)

    return initialize_direct(archive_config, manager_id=manager_id, check_db=check_db)
示例#2
0
def migrate(from_driver_configpath,
            to_driver_configpath,
            from_analysis_archive=False,
            to_analysis_archive=False,
            nodelete=False,
            dontask=False,
            bucket=None):
    """
    Migrate the objects in the document archive from one driver backend to the other. This may be a long running operation depending on the number of objects and amount of data to migrate.

    The migration process expects that the source and destination configurations are provided by config files passed in as arguments. The source configuration generally should be the same
    as the configuration in the anchore engine config.yaml.

    The general flow for a migration is:
    1. Stop anchore-engine services (shutdown the entire cluster to ensure no data modifications during migration)
    2. Create a new configuration yaml with at minimum the services.catalog.archive section configured as you would like it when migraton is complete
    3. Run migration
    4. Update the config.yaml for you anchore-engine system to use the new driver.
    5. Start anchore-engine again

    """

    ecode = 0
    do_migrate = False
    try:
        logger.info('Loading configs')
        from_raw = copy.deepcopy(
            load_config(configfile=from_driver_configpath))
        get_config().clear()

        to_raw = copy.deepcopy(load_config(configfile=to_driver_configpath))

        if from_analysis_archive:
            # Only use the specific key for the source, fail if not found
            from_config = obj_config.extract_config(
                from_raw['services']['catalog'],
                config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID])
        else:
            from_config = obj_config.extract_config(
                from_raw['services']['catalog'],
                config_keys=[
                    DEFAULT_OBJECT_STORE_MANAGER_ID,
                    ALT_OBJECT_STORE_CONFIG_KEY
                ])

        if from_config:
            from_config = obj_config.normalize_config(from_config,
                                                      legacy_fallback=False)
            logger.info('Migration from config: {}'.format(
                json.dumps(from_config, indent=2)))
        else:
            if from_analysis_archive:
                config_key = ANALYSIS_ARCHIVE_MANAGER_ID
            else:
                config_key = '"' + DEFAULT_OBJECT_STORE_MANAGER_ID + '" or "' + ALT_OBJECT_STORE_CONFIG_KEY + '"'
            raise Exception(
                'No valid source configuration found. Needed a configuration section with key {} in the catalog service key'
                .format(config_key))

        if to_analysis_archive:
            # Only use the specific key if set, fail if not found
            to_config = obj_config.extract_config(
                to_raw['services']['catalog'],
                config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID])
        else:
            to_config = obj_config.extract_config(
                to_raw['services']['catalog'],
                config_keys=[
                    DEFAULT_OBJECT_STORE_MANAGER_ID,
                    ALT_OBJECT_STORE_CONFIG_KEY
                ])

        if to_config:
            logger.info('Migration to config: {}'.format(
                json.dumps(to_config, indent=2)))
            to_config = obj_config.normalize_config(to_config,
                                                    legacy_fallback=False)
        else:
            if to_analysis_archive:
                config_key = '"' + ANALYSIS_ARCHIVE_MANAGER_ID + '"'
            else:
                config_key = '"' + DEFAULT_OBJECT_STORE_MANAGER_ID + '" or "' + ALT_OBJECT_STORE_CONFIG_KEY + '"'
            raise Exception(
                'No valid destination configuration found. Needed a configuration section with key {} in the catalog service key'
                .format(config_key))

        if dontask:
            do_migrate = True
        else:
            try:
                answer = input(
                    "Performing this operation requires *all* anchore-engine services to be stopped - proceed? (y/N)"
                )
            except:
                answer = "n"
            if 'y' == answer.lower():
                do_migrate = True

        if do_migrate:
            migration.initiate_migration(from_config,
                                         to_config,
                                         remove_on_source=(not nodelete),
                                         do_lock=True,
                                         buckets_to_migrate=bucket)
            logger.info(
                "After this migration, your anchore-engine config.yaml MUST have the following configuration options added before starting up again:"
            )
            if 'archive_data_dir' in to_config:
                logger.info(
                    "\tNOTE: for archive_data_dir, the value must be set to the location that is accessible within your anchore-engine container"
                )

            logger.info((yaml.dump(to_config, default_flow_style=False)))
        else:
            logger.info("Skipping conversion.")
    except Exception as err:
        logger.error(utils.format_error_output(config, 'dbupgrade', {}, err))
        if not ecode:
            ecode = 2

    utils.doexit(ecode)