Esempio n. 1
0
def list_migrations():
    db_conf = db_context()
    db_preflight(db_conf['params'], db_conf['retries'])

    with session_scope() as db:
        tasks = db_tasks.get_all(task_type=ArchiveMigrationTask,
                                 session=db,
                                 json_safe=True)

    fields = [
        'id', 'state', 'started_at', 'ended_at', 'migrate_from_driver',
        'migrate_to_driver', 'archive_documents_migrated',
        'archive_documents_to_migrate', 'last_updated'
    ]

    headers = [
        'id', 'state', 'start time', 'end time', 'from', 'to',
        'migrated count', 'total to migrate', 'last updated'
    ]

    tbl = PrettyTable(field_names=headers)
    tbl.set_style(PLAIN_COLUMNS)
    for t in tasks:
        tbl.add_row([t[x] for x in fields])

    logger.info((tbl.get_string(sortby='id')))
Esempio n. 2
0
def startup_service(service, configdir):
    pidfile = "/var/run/anchore/" + service + ".pid"
    logfile = "/var/log/anchore/" + service + ".log"
    # os.environ['ANCHORE_LOGFILE'] = logfile

    logger.info("cleaning up service: {}".format(str(service)))
    terminate_service(service, flush_pidfile=True)

    twistd_cmd = 'twistd'
    for f in ['/bin/twistd', '/usr/local/bin/twistd']:
        if os.path.exists(f):
            twistd_cmd = f

    cmd = [
        twistd_cmd, '--logger=anchore_engine.subsys.twistd_logger.logger',
        '--pidfile', pidfile, "-n", service, '--config', configdir
    ]
    logger.info("starting service: {}".format(str(service)))
    logger.info("\t {}".format(' '.join(cmd)))

    try:
        newenv = os.environ.copy()
        newenv['ANCHORE_LOGFILE'] = logfile
        pipes = subprocess.Popen(cmd, env=newenv)
        sout, serr = pipes.communicate()
        rc = pipes.returncode
        raise Exception("process exited: " + str(rc))
    except Exception as err:
        logger.exception("service process exited at ({}): {}".format(
            str(time.ctime()), str(err)))
        logger.fatal('Could not start service due to: {}'.format(str(err)))

    logger.info("exiting service thread")

    return (False)
Esempio n. 3
0
def list_drivers():
    """
    List the available drivers installed locally
    """

    ecode = ExitCode.ok

    try:
        drivers = object_store.manager.get_driver_list()
        logger.info("Supported object storage drivers: " + str(drivers))
    except Exception as err:
        log_error('list-drivers', err)
        if not ecode:
            ecode = ExitCode.failed

    doexit(ecode)
Esempio n. 4
0
def list_migrations():
    db_conf = db_context()
    db_preflight(db_conf["params"], db_conf["retries"])

    with session_scope() as db:
        tasks = db_tasks.get_all(task_type=ArchiveMigrationTask,
                                 session=db,
                                 json_safe=True)

    fields = [
        "id",
        "state",
        "started_at",
        "ended_at",
        "migrate_from_driver",
        "migrate_to_driver",
        "archive_documents_migrated",
        "archive_documents_to_migrate",
        "last_updated",
    ]

    headers = [
        "id",
        "state",
        "start time",
        "end time",
        "from",
        "to",
        "migrated count",
        "total to migrate",
        "last updated",
    ]

    tbl = PrettyTable(field_names=headers)
    tbl.set_style(PLAIN_COLUMNS)
    for t in tasks:
        tbl.add_row([t[x] for x in fields])

    logger.info((tbl.get_string(sortby="id")))
Esempio n. 5
0
def start(services, no_auto_upgrade, anchore_module, skip_config_validate,
          skip_db_compat_check, all):
    """
    Startup and monitor service processes. Specify a list of service names or empty for all.
    """

    global config
    ecode = ExitCode.ok

    if not anchore_module:
        module_name = "anchore_engine"
    else:
        module_name = str(anchore_module)

    if os.environ.get('ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK',
                      str(skip_db_compat_check)).lower() in [
                          'true', 't', 'y', 'yes'
                      ]:
        skip_db_compat_check = True
    else:
        skip_db_compat_check = False

    if services:
        input_services = list(services)
    else:
        input_services = os.getenv('ANCHORE_ENGINE_SERVICES',
                                   '').strip().split()

    if not input_services and not all:
        raise click.exceptions.BadArgumentUsage(
            'No services defined to start. Must either provide service arguments, ANCHORE_ENGINE_SERVICES env var, or --all option'
        )

    try:
        validate_params = {
            'services': True,
            'webhooks': True,
            'credentials': True
        }
        if skip_config_validate:
            try:
                items = skip_config_validate.split(',')
                for item in items:
                    validate_params[item] = False
            except Exception as err:
                raise Exception(err)

        # find/set up configuration
        configdir = config['configdir']
        configfile = os.path.join(configdir, "config.yaml")

        localconfig = None
        if os.path.exists(configfile):
            try:
                localconfig = anchore_engine.configuration.localconfig.load_config(
                    configdir=configdir,
                    configfile=configfile,
                    validate_params=validate_params)
            except Exception as err:
                raise Exception("cannot load local configuration: " + str(err))
        else:
            raise Exception(
                "cannot locate configuration file ({})".format(configfile))

        # load the appropriate DB module
        try:
            logger.info(
                "Loading DB routines from module ({})".format(module_name))
            module = importlib.import_module(module_name +
                                             ".db.entities.upgrade")
        except Exception as err:
            raise Exception("Input anchore-module (" + str(module_name) +
                            ") cannot be found/imported - exception: " +
                            str(err))

        # get the list of local services to start
        startFailed = False
        if not input_services:
            config_services = localconfig.get('services', {})
            if not config_services:
                logger.warn(
                    'could not find any services to execute in the config file'
                )
                sys.exit(1)

            input_services = [
                name for name, srv_conf in list(config_services.items())
                if srv_conf.get('enabled')
            ]

        services = []
        for service_conf_name in input_services:
            if service_conf_name in list(service_map.values()):
                svc = service_conf_name
            else:
                svc = service_map.get(service_conf_name)

            if svc:
                services.append(svc)
            else:
                logger.warn(
                    'specified service {} not found in list of available services {} - removing from list of services to start'
                    .format(service_conf_name, list(service_map.keys())))

        if 'anchore-catalog' in services:
            services.remove('anchore-catalog')
            services.insert(0, 'anchore-catalog')

        if not services:
            logger.error(
                "No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting"
            )
            sys.exit(1)

        # preflight - db checks
        try:
            db_params = anchore_engine.db.entities.common.get_params(
                localconfig)

            # override db_timeout since upgrade might require longer db session timeout setting
            try:
                if 'timeout' in db_params.get('db_connect_args', {}):
                    db_params['db_connect_args']['timeout'] = 86400
                elif 'connect_timeout' in db_params.get('db_connect_args', {}):
                    db_params['db_connect_args']['connect_timeout'] = 86400
            except Exception as err:
                pass

            anchore_manager.util.db.connect_database(db_params, db_retries=300)
            code_versions, db_versions = anchore_manager.util.db.init_database(
                upgrade_module=module,
                localconfig=localconfig,
                do_db_compatibility_check=(not skip_db_compat_check))

            in_sync = False
            timed_out = False
            max_timeout = 3600

            timer = time.time()
            while not in_sync and not timed_out:
                code_versions, db_versions = module.get_versions()

                if code_versions and db_versions:
                    if code_versions['db_version'] != db_versions['db_version']:
                        if not no_auto_upgrade and 'anchore-catalog' in services:
                            logger.info("Performing upgrade.")
                            try:
                                # perform the upgrade logic here
                                rc = module.run_upgrade()
                                if rc:
                                    logger.info("Upgrade completed")
                                else:
                                    logger.info(
                                        "No upgrade necessary. Completed.")
                            except Exception as err:
                                raise err

                            in_sync = True
                        else:
                            logger.warn(
                                "this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds."
                                .format(
                                    str(code_versions['db_version']),
                                    str(db_versions['db_version']),
                                    str(max_timeout -
                                        int(time.time() - timer))))
                            time.sleep(5)
                    else:
                        logger.info("DB version and code version in sync.")
                        in_sync = True
                else:
                    logger.warn(
                        'no existing anchore DB data can be discovered, assuming bootstrap'
                    )
                    in_sync = True

                if (max_timeout - int(time.time() - timer)) < 0:
                    timed_out = True

            if not in_sync:
                raise Exception(
                    "this version of anchore-engine requires the anchore DB version ("
                    + str(code_versions['db_version']) +
                    ") but we discovered anchore DB version (" +
                    str(db_versions['db_version']) +
                    ") in the running DB - please perform the DB upgrade process and retry\n"
                    "See: https://docs.anchore.com/current/docs/engine/engine_installation/upgrade/#advanced--manual-upgrade-procedure"
                )

        except Exception as err:
            raise err

        finally:
            rc = anchore_engine.db.entities.common.do_disconnect()

        # start up services
        logger.info('Starting services: {}'.format(services))

        for supportdir in ["/var/log/anchore", "/var/run/anchore"]:
            try:
                if not os.path.exists(supportdir):
                    os.makedirs(supportdir, 0o755)
            except Exception as err:
                logger.error(
                    "cannot create log directory {} - exception: {}".format(
                        supportdir, str(err)))
                raise err

        pids = []
        keepalive_threads = []
        for service in services:
            pidfile = "/var/run/anchore/" + service + ".pid"
            try:
                terminate_service(service, flush_pidfile=True)

                service_thread = ServiceThread(startup_service,
                                               (service, configdir))
                keepalive_threads.append(service_thread)
                max_tries = 30
                tries = 0
                alive = True
                while not os.path.exists(pidfile) and tries < max_tries:
                    logger.info(
                        "waiting for service pidfile {} to exist {}/{}".format(
                            pidfile, tries, max_tries))

                    try:
                        alive = service_thread.thread.is_alive()
                    except:
                        pass
                    if not alive:
                        logger.info(
                            "service thread has stopped {}".format(service))
                        break

                    time.sleep(1)
                    tries = tries + 1

                logger.info("auto_restart_services setting: {}".format(
                    localconfig.get('auto_restart_services', False)))
                if not localconfig.get('auto_restart_services', False):
                    logger.info(
                        "checking for startup failure pidfile={}, is_alive={}".
                        format(os.path.exists(pidfile), alive))
                    if not os.path.exists(pidfile) or not alive:
                        raise Exception(
                            "service thread for ({}) failed to start".format(
                                service))

                time.sleep(1)
            except Exception as err:
                startFailed = True
                logger.warn("service start failed - exception: {}".format(
                    str(err)))
                break

        if startFailed:
            logger.fatal(
                "one or more services failed to start. cleanly terminating the others"
            )
            for service in services:
                terminate_service(service, flush_pidfile=True)
            sys.exit(1)
        else:
            # start up the log watchers
            try:
                observer = Observer()
                observer.schedule(AnchoreLogWatcher(),
                                  path="/var/log/anchore/")
                observer.start()

                try:
                    while True:
                        time.sleep(1)
                        if localconfig.get(
                                'auto_restart_services', False
                        ):  # 'auto_restart_services' in localconfig and localconfig['auto_restart_services']:
                            for service_thread in keepalive_threads:
                                if not service_thread.thread.is_alive():
                                    logger.info(
                                        "restarting service: {}".format(
                                            service_thread.thread.name))
                                    service_thread.start()

                except KeyboardInterrupt:
                    observer.stop()
                observer.join()

            except Exception as err:
                logger.error(
                    "failed to startup log watchers - exception: {}".format(
                        str(err)))
                raise err

    except Exception as err:
        log_error('servicestart', err)
        ecode = ExitCode.failed

    doexit(ecode)
Esempio n. 6
0
def terminate_service(service, flush_pidfile=False):
    pidfile = "/var/run/anchore/" + service + ".pid"
    try:
        logger.info(
            "Looking for pre-existing service ({}) pid from pidfile ({})".
            format(service, pidfile))
        thepid = None
        if os.path.exists(pidfile):
            with open(pidfile, 'r') as FH:
                thepid = int(FH.read())

        if thepid:
            # get some additional information about the pid to determine whether or not to run the kill operations
            thepid_is_theservice = False
            try:
                running_pid = psutil.Process(thepid)
                cmdline = running_pid.cmdline()
                if pidfile in cmdline:
                    thepid_is_theservice = True
                    logger.info(
                        "Found existing service ({}) running with pid ({})".
                        format(service, thepid))
                else:
                    logger.info(
                        "Found pid running but belongs to unrelated process - skipping terminate"
                    )
            except Exception as err:
                thepid_is_theservice = False

            if thepid_is_theservice:
                try:
                    logger.info(
                        "Terminating existing service ({}) with pid ({}) using signal 0"
                        .format(service, thepid))
                    os.kill(thepid, 0)
                except OSError:
                    pass
                else:
                    logger.info(
                        "Terminating existing service ({}) with pid ({}) using signal 9"
                        .format(service, thepid))
                    os.kill(thepid, 9)

            if flush_pidfile:
                logger.info(
                    "Removing stale pidfile ({}) for service ({})".format(
                        pidfile, service))
                os.remove(pidfile)
    except Exception as err:
        logger.info(
            "Could not detect/shut down running service ({}) - exception: {}".
            format(service, str(err)))
Esempio n. 7
0
def exec(oci_dir, anchore_archive, digest, parent_digest, image_id, tag,
         account_id, manifest, dockerfile, created_at, annotation):
    """
    Analyze a local image stored as a OCI image directory, and generate an anchore image archive tarball ready for import into an anchore engine.

    OCI_DIR : Location of input OCI image directory
    ANCHORE_ARCHIVE : Location of output anchore image archive to write

    """

    global config

    # this could be improved to allow use to input timestamps (created_at, analyzed_at, etc)
    now = int(time.time())
    try:
        try:
            imageDigest = None
            input_manifest_data = None
            rawmanifest = None

            if (not manifest and not digest) or (manifest and digest):
                raise Exception(
                    "must supply either an image digest or a valid manifest, but not both"
                )

            if os.path.exists(anchore_archive):
                raise Exception(
                    "the supplied anchore archive file ({}) already exists, please remove and try again"
                    .format(anchore_archive))

            if manifest:
                try:
                    with open(manifest, 'r') as FH:
                        # TODO implement manifest validator for anchore requirements, specifically
                        rawmanifest = FH.read()
                        input_manifest_data = json.loads(rawmanifest)
                        imageDigest = manifest_to_digest(rawmanifest)
                except Exception as err:
                    raise ValueError(
                        "cannot calculate digest from supplied manifest - exception: {}"
                        .format(err))

            if digest:
                if re.match("^sha256:[\d|a-f]{64}$", digest):
                    imageDigest = digest
                else:
                    raise ValueError(
                        "input digest does not validate - must be sha256:<64 hex characters>"
                    )

            if parent_digest:
                if re.match("^sha256:[\d|a-f]{64}$", parent_digest):
                    parentDigest = parent_digest
                else:
                    raise ValueError(
                        "input parent_digest does not validate - must be sha256:<64 hex characters>"
                    )
            else:
                parentDigest = imageDigest

            if image_id:
                if re.match("^[\d|a-f]{64}$", image_id):
                    imageId = image_id
                else:
                    raise ValueError("input image_id does not validate")
            else:
                # TODO this could be improved to generate imageId from configuration hash
                imageId = "{}".format(''.join(
                    [random.choice('0123456789abcdef') for x in range(0, 64)]))

            if account_id:
                userId = account_id
            else:
                userId = 'admin'

            if created_at:
                if int(created_at) < 0 or int(created_at) > now + 1:
                    raise ValueError(
                        "created_at must by a unix timestamp between 0 and now ({})"
                        .format(now))
            else:
                created_at = now

            try:
                inputTag = tag
                image_info = parse_dockerimage_string(inputTag)
                if not inputTag.startswith("docker.io/") and image_info.get(
                        'registry', '') == 'docker.io':
                    # undo the auto-fill of 'docker.io' for input that doesn't specify registry
                    image_info['registry'] = 'localbuild'
                fulltag = "{}/{}:{}".format(image_info['registry'],
                                            image_info['repo'],
                                            image_info['tag'])
                fulldigest = "{}/{}@{}".format(image_info['registry'],
                                               image_info['repo'], imageDigest)
                logger.info("using fulltag={} fulldigest={}".format(
                    fulltag, fulldigest))
            except Exception as err:
                raise ValueError(
                    "input tag does not validate - exception: {}".format(err))

            dockerfile_mode = "Guessed"
            dockerfile_contents = None
            if dockerfile:
                with open(dockerfile, 'r') as FH:
                    dockerfile_contents = ensure_str(
                        base64.b64encode(ensure_bytes(FH.read())))
                    dockerfile_mode = "Actual"

            annotations = {}
            if annotation:
                for a in annotation:
                    try:
                        (k, v) = a.split('=', 1)
                        if k and v:
                            annotations[k] = v
                        else:
                            raise Exception("found null in key or value")
                    except Exception:
                        raise ValueError(
                            "annotation format error - annotations must be of the form (--annotation key=value), found: {}"
                            .format(a))

            workspace_root = config['tmp_dir']
        except Exception as err:
            # input setup/validation failure
            raise err

        logger.debug(
            "input has been prepared: imageDigest={} parentDigest={} imageId={} inputTag={} fulltag={} fulldigest={} userId={} annotations={} created_at={}"
            .format(imageDigest, parentDigest, imageId, inputTag, fulltag,
                    fulldigest, userId, annotations, created_at))

        # create an image record
        try:
            image_record = make_image_record(userId,
                                             'docker',
                                             None,
                                             image_metadata={
                                                 'tag': fulltag,
                                                 'digest': fulldigest,
                                                 'imageId': imageId,
                                                 'parentdigest': parentDigest,
                                                 'created_at': created_at,
                                                 'dockerfile':
                                                 dockerfile_contents,
                                                 'dockerfile_mode':
                                                 dockerfile_mode,
                                                 'annotations': annotations
                                             },
                                             registry_lookup=False,
                                             registry_creds=(None, None))
            image_record['created_at'] = created_at
            image_record['last_updated'] = created_at
            image_record['analyzed_at'] = now
            image_record['analysis_status'] = 'analyzed'
            image_record['image_status'] = 'active'
            image_record['record_state_key'] = 'active'
            for image_detail in image_record['image_detail']:
                image_detail['created_at'] = created_at
                image_detail['last_updated'] = created_at
                image_detail['tag_detected_at'] = created_at
                image_detail['record_state_key'] = 'active'
        except Exception as err:
            # image record setup fail
            raise err

        # perform analysis
        image_data, analyzed_manifest_data = analyze_image(oci_dir,
                                                           userId,
                                                           rawmanifest,
                                                           image_record,
                                                           workspace_root,
                                                           config,
                                                           registry_creds=[],
                                                           use_cache_dir=None)

        image_content_data = {}
        for content_type in anchore_engine.common.image_content_types + anchore_engine.common.image_metadata_types:
            try:
                image_content_data[
                    content_type] = anchore_engine.common.helpers.extract_analyzer_content(
                        image_data, content_type, manifest=input_manifest_data)
            except Exception:
                logger.exception(
                    "Unable to determine content_type, will fallback to {}")
                image_content_data[content_type] = {}

        anchore_engine.common.helpers.update_image_record_with_analysis_data(
            image_record, image_data)
        image_record['image_size'] = int(image_record['image_size'])

        # generate an output image archive tarball
        archive_file = anchore_archive
        try:
            with ImageArchive.for_writing(archive_file) as img_archive:

                img_archive.account = userId
                img_archive.image_digest = imageDigest
                img_archive.manifest.metadata = {
                    'versions': localconfig.get_versions(),
                    'image_id': imageId,
                    'image_record': json.dumps(image_record, sort_keys=True)
                }

                pack_data = {'document': image_data}
                data = ensure_bytes(json.dumps(pack_data, sort_keys=True))
                img_archive.add_artifact('analysis',
                                         source=ObjectStoreLocation(
                                             bucket='analysis_data',
                                             key=imageDigest),
                                         data=data,
                                         metadata=None)

                pack_data = {'document': image_content_data}
                data = ensure_bytes(json.dumps(pack_data, sort_keys=True))
                img_archive.add_artifact('image_content',
                                         source=ObjectStoreLocation(
                                             bucket='image_content_data',
                                             key=imageDigest),
                                         data=data,
                                         metadata=None)

                pack_data = {'document': input_manifest_data}
                data = ensure_bytes(json.dumps(pack_data, sort_keys=True))
                img_archive.add_artifact('image_manifest',
                                         source=ObjectStoreLocation(
                                             bucket='manifest_data',
                                             key=imageDigest),
                                         data=data,
                                         metadata=None)
        except Exception as err:
            # archive tarball generate fail
            raise err

    except Exception as err:
        log_error('db', err)
        fail_exit()

    click.echo(
        "Analysis complete for image {} - archive file is located at {}".
        format(imageDigest, archive_file))
Esempio n. 8
0
def check(configfile, analysis_archive):
    """
    Test the configuration in the expected anchore-engine config location or override that and use the configuration file provided as an option.

    To test, the system will read and write a very small data document to the driver and then delete it on completion.
    """

    db_conf = db_context()
    db_preflight(db_conf['params'], db_conf['retries'])

    logger.info('Using config file {}'.format(configfile))
    sys_config = load_config(configfile=configfile)

    if sys_config:
        service_config = sys_config['services']['catalog']
    else:
        service_config = None

    if not service_config:
        logger.info(
            'No configuration file or content available. Cannot test archive driver configuration'
        )
        fail_exit()

    if analysis_archive:
        try:
            object_store.initialize(service_config,
                                    manager_id=ANALYSIS_ARCHIVE_MANAGER_ID,
                                    config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID])
        except:
            logger.error(
                'No "analysis_archive" configuration section found in the configuration. To check a config that uses the default backend for analysis archive data, use the regular object storage check'
            )
            fail_exit()

        mgr = object_store.get_manager(ANALYSIS_ARCHIVE_MANAGER_ID)
    else:
        object_store.initialize(service_config,
                                manager_id=DEFAULT_OBJECT_STORE_MANAGER_ID,
                                config_keys=[
                                    DEFAULT_OBJECT_STORE_MANAGER_ID,
                                    ALT_OBJECT_STORE_CONFIG_KEY
                                ])
        mgr = object_store.get_manager()

    test_user_id = 'test'
    test_bucket = 'anchorecliconfigtest'
    test_archive_id = 'cliconfigtest'
    test_data = 'clitesting at {}'.format(
        datetime.datetime.utcnow().isoformat())

    logger.info(
        'Checking existence of test document with user_id = {}, bucket = {} and archive_id = {}'
        .format(test_user_id, test_bucket, test_archive_id))
    if mgr.exists(test_user_id, test_bucket, test_archive_id):
        test_archive_id = 'cliconfigtest2'
        if mgr.exists(test_user_id, test_bucket, test_archive_id):
            logger.error(
                'Found existing records for archive doc to test, aborting test to avoid overwritting any existing data'
            )
            doexit(1)

    logger.info(
        'Creating test document with user_id = {}, bucket = {} and archive_id = {}'
        .format(test_user_id, test_bucket, test_archive_id))
    result = mgr.put(test_user_id,
                     test_bucket,
                     test_archive_id,
                     data=test_data)
    if not result:
        logger.warn(
            'Got empty response form archive PUT operation: {}'.format(result))

    logger.info('Checking document fetch')
    loaded = str(mgr.get(test_user_id, test_bucket, test_archive_id), 'utf-8')
    if not loaded:
        logger.error(
            'Failed retrieving the written document. Got: {}'.format(loaded))
        doexit(ExitCode.obj_store_failed)

    if str(loaded) != test_data:
        logger.error(
            'Failed retrieving the written document. Got something other than expected. Expected: "{}" Got: "{}"'
            .format(test_data, loaded))
        doexit(ExitCode.obj_store_failed)

    logger.info('Removing test object')
    mgr.delete(test_user_id, test_bucket, test_archive_id)

    if mgr.exists(test_user_id, test_bucket, test_archive_id):
        logger.error('Found archive object after it should have been removed')
        doexit(ExitCode.obj_store_failed)

    logger.info('Archive config check completed successfully')
Esempio n. 9
0
def migrate(from_driver_configpath,
            to_driver_configpath,
            from_analysis_archive=False,
            to_analysis_archive=False,
            nodelete=False,
            dontask=False,
            bucket=None):
    """
    Migrate the objects in the document archive from one driver backend to the other. This may be a long running operation depending on the number of objects and amount of data to migrate.

    The migration process expects that the source and destination configurations are provided by config files passed in as arguments. The source configuration generally should be the same
    as the configuration in the anchore engine config.yaml.

    The general flow for a migration is:
    1. Stop anchore-engine services (shutdown the entire cluster to ensure no data modifications during migration)
    2. Create a new configuration yaml with at minimum the services.catalog.archive section configured as you would like it when migraton is complete
    3. Run migration
    4. Update the config.yaml for you anchore-engine system to use the new driver.
    5. Start anchore-engine again

    """

    ecode = ExitCode.ok

    do_migrate = False
    try:
        db_conf = db_context()
        db_preflight(db_conf['params'], db_conf['retries'])

        logger.info('Loading configs')
        from_raw = copy.deepcopy(
            load_config(configfile=from_driver_configpath))
        get_config().clear()

        to_raw = copy.deepcopy(load_config(configfile=to_driver_configpath))

        if from_analysis_archive:
            # Only use the specific key for the source, fail if not found
            from_config = obj_config.extract_config(
                from_raw['services']['catalog'],
                config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID])
        else:
            from_config = obj_config.extract_config(
                from_raw['services']['catalog'],
                config_keys=[
                    DEFAULT_OBJECT_STORE_MANAGER_ID,
                    ALT_OBJECT_STORE_CONFIG_KEY
                ])

        if from_config:
            from_config = obj_config.normalize_config(from_config,
                                                      legacy_fallback=False)
            logger.info('Migration from config: {}'.format(
                json.dumps(from_config, indent=2)))
        else:
            if from_analysis_archive:
                config_key = ANALYSIS_ARCHIVE_MANAGER_ID
            else:
                config_key = '"' + DEFAULT_OBJECT_STORE_MANAGER_ID + '" or "' + ALT_OBJECT_STORE_CONFIG_KEY + '"'
            raise Exception(
                'No valid source configuration found. Needed a configuration section with key {} in the catalog service key'
                .format(config_key))

        if to_analysis_archive:
            # Only use the specific key if set, fail if not found
            to_config = obj_config.extract_config(
                to_raw['services']['catalog'],
                config_keys=[ANALYSIS_ARCHIVE_MANAGER_ID])
        else:
            to_config = obj_config.extract_config(
                to_raw['services']['catalog'],
                config_keys=[
                    DEFAULT_OBJECT_STORE_MANAGER_ID,
                    ALT_OBJECT_STORE_CONFIG_KEY
                ])

        if to_config:
            logger.info('Migration to config: {}'.format(
                json.dumps(to_config, indent=2)))
            to_config = obj_config.normalize_config(to_config,
                                                    legacy_fallback=False)
        else:
            if to_analysis_archive:
                config_key = '"' + ANALYSIS_ARCHIVE_MANAGER_ID + '"'
            else:
                config_key = '"' + DEFAULT_OBJECT_STORE_MANAGER_ID + '" or "' + ALT_OBJECT_STORE_CONFIG_KEY + '"'
            raise Exception(
                'No valid destination configuration found. Needed a configuration section with key {} in the catalog service key'
                .format(config_key))

        if dontask:
            do_migrate = True
        else:
            try:
                answer = input(
                    "Performing this operation requires *all* anchore-engine services to be stopped - proceed? (y/N)"
                )
            except:
                answer = "n"
            if 'y' == answer.lower():
                do_migrate = True

        if do_migrate:
            migration.initiate_migration(from_config,
                                         to_config,
                                         remove_on_source=(not nodelete),
                                         do_lock=True,
                                         buckets_to_migrate=bucket)
            logger.info(
                "After this migration, your anchore-engine config.yaml MUST have the following configuration options added before starting up again:"
            )
            if 'archive_data_dir' in to_config:
                logger.info(
                    "\tNOTE: for archive_data_dir, the value must be set to the location that is accessible within your anchore-engine container"
                )

            logger.info((yaml.dump(to_config, default_flow_style=False)))
        else:
            logger.info("Skipping conversion.")
    except Exception as err:
        log_error('migrate', err)
        fail_exit()

    doexit(ecode)