예제 #1
0
def manifest_to_digest_shellout(rawmanifest):
    ret = None
    tmpmanifest = None
    try:
        fd,tmpmanifest = tempfile.mkstemp()
        os.write(fd, rawmanifest.encode('utf-8'))
        os.close(fd)

        localconfig = anchore_engine.configuration.localconfig.get_config()
        global_timeout = localconfig.get('skopeo_global_timeout', 0)
        try:
            global_timeout = int(global_timeout)
            if global_timeout < 0:
                global_timeout = 0
        except:
            global_timeout = 0

        if global_timeout:
            global_timeout_str = "--command-timeout {}s".format(global_timeout)
        else:
            global_timeout_str = ""

        cmd = "skopeo {} manifest-digest {}".format(global_timeout_str, tmpmanifest)
        rc, sout, serr = run_command(cmd)
        if rc == 0 and re.match("^sha256:.*", str(sout, 'utf-8')):
            ret = sout.strip()
        else:
            logger.warn("failed to calculate digest from schema v1 manifest: cmd={} rc={} sout={} serr={}".format(cmd, rc, sout, serr))
            raise SkopeoError(cmd=cmd, rc=rc, err=serr, out=sout, msg='Failed to calculate digest from schema v1 manifest', )
    except Exception as err:
        raise err
    finally:
        if tmpmanifest:
            os.remove(tmpmanifest)

    return(ret)
예제 #2
0
def update_policy(bundle, policyId, active=False):
    request_inputs = anchore_engine.apis.do_request_prep(
        request, default_params={'active': active})
    method = request_inputs['method']
    bodycontent = request_inputs['bodycontent']
    params = request_inputs['params']
    userId = request_inputs['userId']

    return_object = {}
    httpcode = 500

    try:
        logger.debug("Updating policy")
        client = internal_client_for(CatalogClient, request_inputs['userId'])

        if not bodycontent:
            bodycontent = '{}'

        jsondata = json.loads(bodycontent)

        if not jsondata:
            jsondata['policyId'] = policyId

        if active:
            jsondata['active'] = True
        elif 'active' not in jsondata:
            jsondata['active'] = False

        try:
            policy_record = client.get_policy(policyId=policyId)
        except Exception as err:
            logger.warn("unable to get policy_records for user (" +
                        str(userId) + ") - exception: " + str(err))
            raise err

        if policy_record:
            if policy_record['active'] and not jsondata['active']:
                httpcode = 500
                raise Exception(
                    "cannot deactivate an active policy - can only activate an inactive policy"
                )
            elif policyId != jsondata['policyId']:
                httpcode = 500
                raise Exception(
                    "policyId in route is different from policyId in payload: {} != {}"
                    .format(policyId, jsondata['policyId']))

            policy_record.update(jsondata)
            policy_record['policyId'] = policyId

            # schema check
            try:
                localconfig = anchore_engine.configuration.localconfig.get_config(
                )
                user_auth = localconfig['system_user_auth']
                verify = localconfig.get('internal_ssl_verify', True)
                p_client = internal_client_for(PolicyEngineClient, userId)
                response = p_client.validate_bundle(jsondata['policybundle'])
                if not response.get('valid', False):
                    httpcode = 400
                    return_object = anchore_engine.common.helpers.make_response_error(
                        'Bundle failed validation',
                        in_httpcode=400,
                        detail={
                            'validation_details':
                            [x.to_dict() for x in response.validation_details]
                        })
                    return (return_object, httpcode)

            except Exception as err:
                raise Exception(
                    'Error response from policy service during bundle validation. Validation could not be performed: {}'
                    .format(err))

            return_policy_record = client.update_policy(
                policyId, policy_record=policy_record)
            return_object = [
                make_response_policy(return_policy_record, params)
            ]
            httpcode = 200
        else:
            httpcode = 404
            raise Exception("cannot locate specified policyId")
    except Exception as err:
        return_object = anchore_engine.common.helpers.make_response_error(
            err, in_httpcode=httpcode)
        httpcode = return_object['httpcode']
    return (return_object, httpcode)
예제 #3
0
def add_policy(bundle):
    request_inputs = anchore_engine.apis.do_request_prep(request,
                                                         default_params={})
    bodycontent = request_inputs['bodycontent']
    params = request_inputs['params']

    return_object = []
    httpcode = 500
    userId = request_inputs['userId']

    try:
        logger.debug('Adding policy')
        client = internal_client_for(CatalogClient, request_inputs['userId'])
        jsondata = json.loads(bodycontent)

        # schema check
        try:
            localconfig = anchore_engine.configuration.localconfig.get_config()
            user_auth = localconfig['system_user_auth']
            verify = localconfig.get('internal_ssl_verify', True)

            p_client = internal_client_for(PolicyEngineClient, userId=userId)
            response = p_client.validate_bundle(jsondata)
            if not response.get('valid', False):
                httpcode = 400
                return_object = anchore_engine.common.helpers.make_response_error(
                    'Bundle failed validation',
                    in_httpcode=400,
                    detail={
                        'validation_details':
                        [x.to_dict() for x in response.validation_details]
                    })
                return (return_object, httpcode)

        except Exception as err:
            raise Exception(
                'Error response from policy service during bundle validation. Validation could not be performed: {}'
                .format(err))

        if 'id' in jsondata and jsondata['id']:
            policyId = jsondata['id']
        else:
            policyId = hashlib.md5(
                str(userId + ":" +
                    jsondata['name']).encode('utf8')).hexdigest()
            jsondata['id'] = policyId

        try:
            policybundle = jsondata
            policy_record = client.add_policy(policybundle)
        except Exception as err:
            raise Exception(
                "cannot store policy data to catalog - exception: " + str(err))

        if policy_record:
            return_object = make_response_policy(policy_record, params)
            httpcode = 200
        else:
            raise Exception('failed to add policy to catalog DB')
    except Exception as err:
        logger.debug("operation exception: " + str(err))
        return_object = anchore_engine.common.helpers.make_response_error(
            err, in_httpcode=httpcode)
        httpcode = return_object['httpcode']

    return (return_object, httpcode)
예제 #4
0
def start(auto_upgrade, anchore_module, skip_config_validate, skip_db_compat_check, service=None):
    """
    Startup and monitor service processes. Specify a list of service names or empty for all.
    """

    global config
    ecode = 0

    auto_upgrade = True

    if not anchore_module:
        module_name = "anchore_engine"
    else:
        module_name = str(anchore_module)

    if os.environ.get('ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK', str(skip_db_compat_check)).lower() in ['true', 't', 'y', 'yes']:
        skip_db_compat_check = True
    else:
        skip_db_compat_check = False

    if service:
        input_services = list(service)
    else:
        input_services = []

    try:
        validate_params = {
            'services': True,
            'webhooks': True,
            'credentials': True
        }
        if skip_config_validate:
            try:
                items = skip_config_validate.split(',')
                for item in items:
                    validate_params[item] = False
            except Exception as err:
                raise Exception(err)

        # find/set up configuration        
        configdir = config['configdir']
        configfile = os.path.join(configdir, "config.yaml")

        localconfig = None
        if os.path.exists(configfile):
            try:
                localconfig = anchore_engine.configuration.localconfig.load_config(configdir=configdir, configfile=configfile, validate_params=validate_params)
            except Exception as err:
                raise Exception("cannot load local configuration: " + str(err))
        else:
            raise Exception("cannot locate configuration file ({})".format(configfile))

        # load the appropriate DB module
        try:
            logger.info("Loading DB routines from module ({})".format(module_name))
            module = importlib.import_module(module_name + ".db.entities.upgrade")
        except TableNotFoundError as ex:
            logger.info("Initialized DB not found.")
        except Exception as err:
            raise Exception("Input anchore-module (" + str(module_name) + ") cannot be found/imported - exception: " + str(err))

        # get the list of local services to start
        startFailed = False
        if not input_services:
            config_services = localconfig.get('services', {})
            if not config_services:
                logger.warn('could not find any services to execute in the config file')
                sys.exit(1)

            input_services = [ name for name, srv_conf in list(config_services.items()) if srv_conf.get('enabled')]

        services = []
        for service_conf_name in input_services:
            if service_conf_name in list(service_map.values()):
                svc = service_conf_name
            else:
                svc = service_map.get(service_conf_name)

            if svc:
                services.append(svc)
            else:
                logger.warn('specified service {} not found in list of available services {} - removing from list of services to start'.format(service_conf_name, list(service_map.keys())))

        if 'anchore-catalog' in services:
            services.remove('anchore-catalog')
            services.insert(0, 'anchore-catalog')

        if not services:
            logger.error("No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting")
            sys.exit(1)


        # preflight - db checks
        try:
            db_params = anchore_engine.db.entities.common.get_params(localconfig)
            #override db_timeout since upgrade might require longer db session timeout setting
            try:
                db_params['db_connect_args']['timeout'] = 86400
            except Exception as err:
                pass
            
            anchore_manager.cli.utils.connect_database(config, db_params, db_retries=300)
            code_versions, db_versions = anchore_manager.cli.utils.init_database(upgrade_module=module, localconfig=localconfig, do_db_compatibility_check=(not skip_db_compat_check))

            in_sync = False
            timed_out = False
            max_timeout = 3600

            timer = time.time()
            while not in_sync and not timed_out:
                code_versions, db_versions = module.get_versions()

                if code_versions and db_versions:
                    if code_versions['db_version'] != db_versions['db_version']:
                        if auto_upgrade and 'anchore-catalog' in services:
                            logger.info("Auto-upgrade is set - performing upgrade.")
                            try:
                                # perform the upgrade logic here
                                rc = module.run_upgrade()
                                if rc:
                                    logger.info("Upgrade completed")
                                else:
                                    logger.info("No upgrade necessary. Completed.")
                            except Exception as err:
                                raise err

                            in_sync = True
                        else:
                            logger.warn("this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds.".format(str(code_versions['db_version']), str(db_versions['db_version']), str(max_timeout - int(time.time() - timer))))
                            time.sleep(5)
                    else:
                        logger.info("DB version and code version in sync.")
                        in_sync = True
                else:
                    logger.warn('no existing anchore DB data can be discovered, assuming bootstrap')
                    in_sync = True

                if (max_timeout - int(time.time() - timer)) < 0:
                    timed_out = True

            if not in_sync:
                raise Exception("this version of anchore-engine requires the anchore DB version ("+str(code_versions['db_version'])+") but we discovered anchore DB version ("+str(db_versions['db_version'])+") in the running DB - please perform the DB upgrade process and retry")

        except Exception as err:
            raise err

        finally:
            rc = anchore_engine.db.entities.common.do_disconnect()

        # start up services
        logger.info('Starting services: {}'.format(services))
        try:
            if not os.path.exists("/var/log/anchore"):
                os.makedirs("/var/log/anchore/", 0o755)
        except Exception as err:
            logger.error("cannot create log directory /var/log/anchore - exception: {}".format(str(err)))
            raise err

        pids = []
        keepalive_threads = []
        for service in services:
            pidfile = "/var/run/" + service + ".pid"
            try:
                service_thread = ServiceThread(startup_service, (service, configdir))
                keepalive_threads.append(service_thread)
                max_tries = 30
                tries = 0
                while not os.path.exists(pidfile) and tries < max_tries:
                    time.sleep(1)
                    tries = tries + 1

                time.sleep(2)
            except Exception as err:
                startFailed = True
                logger.warn("service start failed - exception: {}".format(str(err)))

        if startFailed:
            logger.error("one or more services failed to start. cleanly terminating the others")
            for service in services:
                terminate_service(service, flush_pidfile=True)

            sys.exit(1)
        else:
            # start up the log watchers
            try:
                observer = Observer()
                observer.schedule(AnchoreLogWatcher(), path="/var/log/anchore/")
                observer.start()

                try:
                    while True:
                        time.sleep(1)
                        if 'auto_restart_services' in localconfig and localconfig['auto_restart_services']:
                            for service_thread in keepalive_threads:
                                if not service_thread.thread.is_alive():
                                    logger.info("restarting service: {}".format(service_thread.thread.name))
                                    service_thread.start()

                except KeyboardInterrupt:
                    observer.stop()
                observer.join()

            except Exception as err:
                logger.error("failed to startup log watchers - exception: {}".format(str(err)))
                raise err

    except Exception as err:
        logger.error(anchore_manager.cli.utils.format_error_output(config, 'servicestart', {}, err))
        if not ecode:
            ecode = 2
            
    anchore_manager.cli.utils.doexit(ecode)
예제 #5
0
def update_policy(bundle, policyId, active=False):
    request_inputs = anchore_engine.apis.do_request_prep(
        request, default_params={"active": active})
    method = request_inputs["method"]
    bodycontent = request_inputs["bodycontent"]
    params = request_inputs["params"]
    userId = request_inputs["userId"]

    return_object = {}
    httpcode = 500

    try:
        logger.debug("Updating policy")
        client = internal_client_for(CatalogClient, request_inputs["userId"])

        if not bodycontent:
            bodycontent = "{}"

        jsondata = json.loads(bodycontent)

        if not jsondata:
            jsondata["policyId"] = policyId

        if active:
            jsondata["active"] = True
        elif "active" not in jsondata:
            jsondata["active"] = False

        try:
            policy_record = client.get_policy(policyId=policyId)
        except Exception as err:
            logger.warn("unable to get policy_records for user (" +
                        str(userId) + ") - exception: " + str(err))
            raise err

        if policy_record:
            if policy_record["active"] and not jsondata["active"]:
                httpcode = 500
                raise Exception(
                    "cannot deactivate an active policy - can only activate an inactive policy"
                )
            elif policyId != jsondata["policyId"]:
                httpcode = 500
                raise Exception(
                    "policyId in route is different from policyId in payload: {} != {}"
                    .format(policyId, jsondata["policyId"]))

            policy_record.update(jsondata)
            policy_record["policyId"] = policyId

            # schema check
            try:
                localconfig = anchore_engine.configuration.localconfig.get_config(
                )
                user_auth = localconfig["system_user_auth"]
                verify = localconfig.get("internal_ssl_verify", True)
                p_client = internal_client_for(PolicyEngineClient, userId)
                response = p_client.validate_bundle(jsondata["policybundle"])
                if not response.get("valid", False):
                    httpcode = 400
                    return_object = anchore_engine.common.helpers.make_response_error(
                        "Bundle failed validation",
                        in_httpcode=400,
                        details={
                            "validation_details":
                            response.get("validation_details")
                        },
                    )
                    return return_object, httpcode

            except Exception as err:
                raise Exception(
                    "Error response from policy service during bundle validation. Validation could not be performed: {}"
                    .format(err))

            return_policy_record = client.update_policy(
                policyId, policy_record=policy_record)
            return_object = [
                make_response_policy(return_policy_record, params)
            ]
            httpcode = 200
        else:
            httpcode = 404
            raise Exception("cannot locate specified policyId")
    except Exception as err:
        return_object = anchore_engine.common.helpers.make_response_error(
            err, in_httpcode=httpcode)
        httpcode = return_object["httpcode"]
    return return_object, httpcode
예제 #6
0
def start(
    services,
    no_auto_upgrade,
    anchore_module,
    skip_config_validate,
    skip_db_compat_check,
    all,
):
    """
    Startup and monitor service processes. Specify a list of service names or empty for all.
    """

    global config
    ecode = ExitCode.ok

    if not anchore_module:
        module_name = "anchore_engine"
    else:
        module_name = str(anchore_module)

    if os.environ.get("ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK",
                      str(skip_db_compat_check)).lower() in [
                          "true", "t", "y", "yes"
                      ]:
        skip_db_compat_check = True
    else:
        skip_db_compat_check = False

    if services:
        input_services = list(services)
    else:
        input_services = os.getenv("ANCHORE_ENGINE_SERVICES",
                                   "").strip().split()

    if not input_services and not all:
        raise click.exceptions.BadArgumentUsage(
            "No services defined to start. Must either provide service arguments, ANCHORE_ENGINE_SERVICES env var, or --all option"
        )

    try:
        validate_params = {
            "services": True,
            "webhooks": True,
            "credentials": True
        }
        if skip_config_validate:
            try:
                items = skip_config_validate.split(",")
                for item in items:
                    validate_params[item] = False
            except Exception as err:
                raise Exception(err)

        # find/set up configuration
        configdir = config["configdir"]
        configfile = os.path.join(configdir, "config.yaml")

        localconfig = None
        if os.path.exists(configfile):
            try:
                localconfig = anchore_engine.configuration.localconfig.load_config(
                    configdir=configdir,
                    configfile=configfile,
                    validate_params=validate_params,
                )
            except Exception as err:
                raise Exception("cannot load local configuration: " + str(err))
        else:
            raise Exception(
                "cannot locate configuration file ({})".format(configfile))

        # load the appropriate DB module
        try:
            logger.info(
                "Loading DB routines from module ({})".format(module_name))
            module = importlib.import_module(module_name +
                                             ".db.entities.upgrade")
        except Exception as err:
            raise Exception("Input anchore-module (" + str(module_name) +
                            ") cannot be found/imported - exception: " +
                            str(err))

        # get the list of local services to start
        startFailed = False
        if not input_services:
            config_services = localconfig.get("services", {})
            if not config_services:
                logger.warn(
                    "could not find any services to execute in the config file"
                )
                sys.exit(1)

            input_services = [
                name for name, srv_conf in list(config_services.items())
                if srv_conf.get("enabled")
            ]

        services = []
        for service_conf_name in input_services:
            if service_conf_name in list(service_map.values()):
                svc = service_conf_name
            else:
                svc = service_map.get(service_conf_name)

            if svc:
                services.append(svc)
            else:
                logger.warn(
                    "specified service {} not found in list of available services {} - removing from list of services to start"
                    .format(service_conf_name, list(service_map.keys())))

        if "anchore-catalog" in services:
            services.remove("anchore-catalog")
            services.insert(0, "anchore-catalog")

        if not services:
            logger.error(
                "No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting"
            )
            sys.exit(1)

        # preflight - db checks
        try:
            db_params = anchore_engine.db.entities.common.get_params(
                localconfig)

            # override db_timeout since upgrade might require longer db session timeout setting
            try:
                if "timeout" in db_params.get("db_connect_args", {}):
                    db_params["db_connect_args"]["timeout"] = 86400
                elif "connect_timeout" in db_params.get("db_connect_args", {}):
                    db_params["db_connect_args"]["connect_timeout"] = 86400
            except Exception as err:
                pass

            anchore_manager.util.db.connect_database(db_params, db_retries=300)
            code_versions, db_versions = anchore_manager.util.db.init_database(
                upgrade_module=module,
                localconfig=localconfig,
                do_db_compatibility_check=(not skip_db_compat_check),
            )

            in_sync = False
            timed_out = False
            max_timeout = 3600

            timer = time.time()
            while not in_sync and not timed_out:
                code_versions, db_versions = module.get_versions()

                if code_versions and db_versions:
                    if code_versions["db_version"] != db_versions["db_version"]:
                        if not no_auto_upgrade and "anchore-catalog" in services:
                            logger.info("Performing upgrade.")
                            try:
                                # perform the upgrade logic here
                                rc = module.run_upgrade()
                                if rc:
                                    logger.info("Upgrade completed")
                                else:
                                    logger.info(
                                        "No upgrade necessary. Completed.")
                            except Exception as err:
                                raise err

                            in_sync = True
                        else:
                            logger.warn(
                                "this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds."
                                .format(
                                    str(code_versions["db_version"]),
                                    str(db_versions["db_version"]),
                                    str(max_timeout -
                                        int(time.time() - timer)),
                                ))
                            time.sleep(5)
                    else:
                        logger.info("DB version and code version in sync.")
                        in_sync = True
                else:
                    logger.warn(
                        "no existing anchore DB data can be discovered, assuming bootstrap"
                    )
                    in_sync = True

                if (max_timeout - int(time.time() - timer)) < 0:
                    timed_out = True

            if not in_sync:
                raise Exception(
                    "this version of anchore-engine requires the anchore DB version ("
                    + str(code_versions["db_version"]) +
                    ") but we discovered anchore DB version (" +
                    str(db_versions["db_version"]) +
                    ") in the running DB - please perform the DB upgrade process and retry\n"
                    "See: https://engine.anchore.io/docs/install/upgrade/#advanced--manual-upgrade-procedure"
                )

        except Exception as err:
            raise err

        finally:
            rc = anchore_engine.db.entities.common.do_disconnect()

        # start up services
        logger.info("Starting services: {}".format(services))

        for supportdir in ["/var/log/anchore", "/var/run/anchore"]:
            try:
                if not os.path.exists(supportdir):
                    os.makedirs(supportdir, 0o755)
            except Exception as err:
                logger.error(
                    "cannot create log directory {} - exception: {}".format(
                        supportdir, str(err)))
                raise err

        pids = []
        keepalive_threads = []
        for service in services:
            pidfile = "/var/run/anchore/" + service + ".pid"
            try:
                terminate_service(service, flush_pidfile=True)

                service_thread = ServiceThread(startup_service,
                                               (service, configdir))
                keepalive_threads.append(service_thread)
                max_tries = 30
                tries = 0
                alive = True
                while not os.path.exists(pidfile) and tries < max_tries:
                    logger.info(
                        "waiting for service pidfile {} to exist {}/{}".format(
                            pidfile, tries, max_tries))

                    try:
                        alive = service_thread.thread.is_alive()
                    except:
                        pass
                    if not alive:
                        logger.info(
                            "service thread has stopped {}".format(service))
                        break

                    time.sleep(1)
                    tries = tries + 1

                logger.info("auto_restart_services setting: {}".format(
                    localconfig.get("auto_restart_services", False)))
                if not localconfig.get("auto_restart_services", False):
                    logger.info(
                        "checking for startup failure pidfile={}, is_alive={}".
                        format(os.path.exists(pidfile), alive))
                    if not os.path.exists(pidfile) or not alive:
                        raise Exception(
                            "service thread for ({}) failed to start".format(
                                service))

                time.sleep(1)
            except Exception as err:
                startFailed = True
                logger.warn("service start failed - exception: {}".format(
                    str(err)))
                break

        if startFailed:
            logger.fatal(
                "one or more services failed to start. cleanly terminating the others"
            )
            for service in services:
                terminate_service(service, flush_pidfile=True)
            sys.exit(1)
        else:
            # start up the log watchers
            try:
                observer = Observer()
                observer.schedule(AnchoreLogWatcher(),
                                  path="/var/log/anchore/")
                observer.start()

                try:
                    while True:
                        time.sleep(1)
                        if localconfig.get(
                                "auto_restart_services", False
                        ):  # 'auto_restart_services' in localconfig and localconfig['auto_restart_services']:
                            for service_thread in keepalive_threads:
                                if not service_thread.thread.is_alive():
                                    logger.info(
                                        "restarting service: {}".format(
                                            service_thread.thread.name))
                                    service_thread.start()

                except KeyboardInterrupt:
                    observer.stop()
                observer.join()

            except Exception as err:
                logger.error(
                    "failed to startup log watchers - exception: {}".format(
                        str(err)))
                raise err

    except Exception as err:
        log_error("servicestart", err)
        ecode = ExitCode.failed

    doexit(ecode)
예제 #7
0
def add_policy(bundle):
    request_inputs = anchore_engine.apis.do_request_prep(request,
                                                         default_params={})
    bodycontent = request_inputs["bodycontent"]
    params = request_inputs["params"]

    return_object = []
    httpcode = 500
    userId = request_inputs["userId"]

    try:
        logger.debug("Adding policy")
        client = internal_client_for(CatalogClient, request_inputs["userId"])
        jsondata = json.loads(bodycontent)

        # schema check
        try:
            localconfig = anchore_engine.configuration.localconfig.get_config()
            user_auth = localconfig["system_user_auth"]
            verify = localconfig.get("internal_ssl_verify", True)

            p_client = internal_client_for(PolicyEngineClient, userId=userId)
            response = p_client.validate_bundle(jsondata)

            if not response.get("valid", False):
                httpcode = 400
                return_object = anchore_engine.common.helpers.make_response_error(
                    "Bundle failed validation",
                    in_httpcode=400,
                    details={
                        "validation_details":
                        response.get("validation_details")
                    },
                )
                return return_object, httpcode

        except Exception as err:
            raise Exception(
                "Error response from policy service during bundle validation. Validation could not be performed: {}"
                .format(err))

        if "id" in jsondata and jsondata["id"]:
            policyId = jsondata["id"]
        else:
            policyId = hashlib.md5(
                str(userId + ":" +
                    jsondata["name"]).encode("utf8")).hexdigest()
            jsondata["id"] = policyId

        try:
            policybundle = jsondata
            policy_record = client.add_policy(policybundle)
        except Exception as err:
            raise Exception(
                "cannot store policy data to catalog - exception: " + str(err))

        if policy_record:
            return_object = make_response_policy(policy_record, params)
            httpcode = 200
        else:
            raise Exception("failed to add policy to catalog DB")
    except Exception as err:
        logger.debug("operation exception: " + str(err))
        return_object = anchore_engine.common.helpers.make_response_error(
            err, in_httpcode=httpcode)
        httpcode = return_object["httpcode"]

    return return_object, httpcode
예제 #8
0
def get_image_manifest_skopeo_raw(pullstring, user=None, pw=None, verify=True):
    ret = None
    try:
        proc_env = os.environ.copy()
        if user and pw:
            proc_env["SKOPUSER"] = user
            proc_env["SKOPPASS"] = pw
            credstr = '--creds "${SKOPUSER}":"${SKOPPASS}"'
        else:
            credstr = ""

        if verify:
            tlsverifystr = "--tls-verify=true"
        else:
            tlsverifystr = "--tls-verify=false"

        localconfig = anchore_engine.configuration.localconfig.get_config()
        global_timeout = localconfig.get("skopeo_global_timeout", 0)
        try:
            global_timeout = int(global_timeout)
            if global_timeout < 0:
                global_timeout = 0
        except:
            global_timeout = 0

        if global_timeout:
            global_timeout_str = "--command-timeout {}s".format(global_timeout)
        else:
            global_timeout_str = ""

        os_override_strs = ["", "--override-os windows"]
        try:
            success = False
            for os_override_str in os_override_strs:
                cmd = [
                    "/bin/sh",
                    "-c",
                    "skopeo {} {} inspect --raw {} {} docker://{}".format(
                        global_timeout_str,
                        os_override_str,
                        tlsverifystr,
                        credstr,
                        pullstring,
                    ),
                ]
                cmdstr = " ".join(cmd)
                try:
                    rc, sout, serr = run_command_list(cmd, env=proc_env)
                    if rc != 0:
                        skopeo_error = SkopeoError(cmd=cmd,
                                                   rc=rc,
                                                   out=sout,
                                                   err=serr)
                        if skopeo_error.error_code != AnchoreError.OSARCH_MISMATCH.name:
                            raise SkopeoError(cmd=cmd,
                                              rc=rc,
                                              out=sout,
                                              err=serr)
                    else:
                        logger.debug("command succeeded: cmd=" + str(cmdstr) +
                                     " stdout=" + str(sout).strip() +
                                     " stderr=" + str(serr).strip())
                        success = True
                except Exception as err:
                    logger.error("command failed with exception - " + str(err))
                    raise err

                if success:
                    sout = str(sout, "utf-8") if sout else None
                    ret = sout
                    break

            if not success:
                logger.error("could not retrieve manifest")
                raise Exception("could not retrieve manifest")

        except Exception as err:
            raise err
    except Exception as err:
        raise err

    return ret
예제 #9
0
def download_image(
    fulltag,
    copydir,
    user=None,
    pw=None,
    verify=True,
    manifest=None,
    parent_manifest=None,
    use_cache_dir=None,
    remove_signatures=True,
):
    try:
        proc_env = os.environ.copy()
        if user and pw:
            proc_env["SKOPUSER"] = user
            proc_env["SKOPPASS"] = pw
            credstr = '--src-creds "${SKOPUSER}":"${SKOPPASS}"'
        else:
            credstr = ""

        if verify:
            tlsverifystr = "--src-tls-verify=true"
        else:
            tlsverifystr = "--src-tls-verify=false"

        if use_cache_dir and os.path.exists(use_cache_dir):
            cachestr = "--dest-shared-blob-dir " + use_cache_dir
        else:
            cachestr = ""

        localconfig = anchore_engine.configuration.localconfig.get_config()
        global_timeout = localconfig.get("skopeo_global_timeout", 0)
        try:
            global_timeout = int(global_timeout)
            if global_timeout < 0:
                global_timeout = 0
        except:
            global_timeout = 0

        if global_timeout:
            global_timeout_str = "--command-timeout {}s".format(global_timeout)
        else:
            global_timeout_str = ""

        os_overrides = [""]
        blobs_to_fetch = []

        if manifest:
            manifest_data = json.loads(manifest)

            for layer in manifest_data.get("layers", []):
                if "foreign.diff" in layer.get("mediaType", ""):
                    layer_digest_raw = layer.get("digest", "")
                    layer_digest = get_digest_value(layer_digest_raw)
                    layer_urls = layer.get("urls", [])

                    blobs_to_fetch.append({
                        "digest": layer_digest,
                        "urls": layer_urls
                    })

            if parent_manifest:
                parent_manifest_data = json.loads(parent_manifest)
            else:
                parent_manifest_data = {}

            if parent_manifest_data:
                for mlist in parent_manifest_data.get("manifests", []):
                    imageos = mlist.get("platform", {}).get("os", "")
                    if imageos not in ["", "linux"]:
                        # add a windows os override to the list of override attempts, to complete the options that are supported by skopeo
                        os_overrides.insert(0, "windows")
                        break

        if remove_signatures:
            remove_signatures_string = "--remove-signatures"
        else:
            remove_signatures_string = ""

        for os_override in os_overrides:
            success = False
            if os_override not in ["", "linux"]:
                os_override_str = "--override-os {}".format(os_override)
            else:
                os_override_str = ""

            if manifest:
                with open(os.path.join(copydir, "manifest.json"), "w") as OFH:
                    OFH.write(manifest)

            if parent_manifest:
                with open(os.path.join(copydir, "parent_manifest.json"),
                          "w") as OFH:
                    OFH.write(parent_manifest)

            cmd = [
                "/bin/sh",
                "-c",
                "skopeo {} {} copy {} {} {} {} docker://{} oci:{}:image".
                format(
                    os_override_str,
                    global_timeout_str,
                    remove_signatures_string,
                    tlsverifystr,
                    credstr,
                    cachestr,
                    fulltag,
                    copydir,
                ),
            ]

            cmdstr = " ".join(cmd)
            try:
                rc, sout, serr = run_command_list(cmd, env=proc_env)
                if rc != 0:
                    skopeo_error = SkopeoError(cmd=cmd,
                                               rc=rc,
                                               out=sout,
                                               err=serr)
                    if skopeo_error.error_code != AnchoreError.OSARCH_MISMATCH.name:
                        raise SkopeoError(cmd=cmd, rc=rc, out=sout, err=serr)
                else:
                    logger.debug("command succeeded: cmd=" + str(cmdstr) +
                                 " stdout=" + str(sout).strip() + " stderr=" +
                                 str(serr).strip())
                    success = True

            except Exception as err:
                logger.error("command failed with exception - " + str(err))
                raise err

            if success:
                blobs_dir = os.path.join(copydir, "blobs")

                if use_cache_dir:
                    # syft expects blobs to be nested inside of the oci image directory. If the --dest-shared-blob-dir skopeo option is used we need to
                    # provide access to the blobs via a symlink, as if the blobs were stored within the oci image directory
                    if os.path.exists(blobs_dir) and os.path.isdir(blobs_dir):
                        # if this directory is not empty, there is an issue and we should expect an exception
                        os.rmdir(blobs_dir)

                    os.symlink(use_cache_dir, blobs_dir)

                fetch_oci_blobs(blobs_dir, blobs_to_fetch)

                index_file_path = os.path.join(copydir, "index.json")
                ensure_no_nondistributable_media_types(index_file_path)
                ensure_layer_media_types_are_correct(copydir)

                break
        if not success:
            logger.error("could not download image")
            raise Exception("could not download image")
    except Exception as err:
        raise err

    return True
예제 #10
0
def get_repo_tags_skopeo(url, registry, repo, user=None, pw=None, verify=None):
    try:
        proc_env = os.environ.copy()
        if user and pw:
            proc_env["SKOPUSER"] = user
            proc_env["SKOPPASS"] = pw
            credstr = '--creds "${SKOPUSER}":"${SKOPPASS}"'
        else:
            credstr = ""

        if verify:
            tlsverifystr = "--tls-verify=true"
        else:
            tlsverifystr = "--tls-verify=false"

        localconfig = anchore_engine.configuration.localconfig.get_config()
        global_timeout = localconfig.get("skopeo_global_timeout", 0)
        try:
            global_timeout = int(global_timeout)
            if global_timeout < 0:
                global_timeout = 0
        except:
            global_timeout = 0

        if global_timeout:
            global_timeout_str = "--command-timeout {}s".format(global_timeout)
        else:
            global_timeout_str = ""

        pullstring = registry + "/" + repo

        repotags = []

        cmd = [
            "/bin/sh",
            "-c",
            "skopeo {} list-tags {} {} docker://{}".format(
                global_timeout_str, tlsverifystr, credstr, pullstring),
        ]
        cmdstr = " ".join(cmd)
        try:
            rc, sout, serr = run_command_list(cmd, env=proc_env)
            sout = str(sout, "utf-8") if sout else None
            if rc != 0:
                raise SkopeoError(cmd=cmd, rc=rc, out=sout, err=serr)
            else:
                logger.debug("command succeeded: cmd=" + str(cmdstr) +
                             " stdout=" + str(sout).strip() + " stderr=" +
                             str(serr).strip())
        except Exception as err:
            logger.error("command failed with exception - " + str(err))
            raise err

        data = json.loads(sout)
        repotags = data.get("Tags", [])
    except Exception as err:
        raise err

    if not repotags:
        raise Exception("no tags found for input repo from skopeo")

    return repotags
예제 #11
0
def process_analyzer_job(system_user_auth, qobj, layer_cache_enable):
    global servicename  #current_avg, current_avg_count

    timer = int(time.time())
    analysis_events = []
    userId = None
    imageDigest = None

    localconfig = anchore_engine.configuration.localconfig.get_config()
    myconfig = localconfig['services']['analyzer']

    try:
        logger.debug('dequeued object: {}'.format(qobj))

        record = qobj['data']
        userId = record['userId']
        imageDigest = record['imageDigest']
        manifest = record['manifest']
        parent_manifest = record.get('parent_manifest', None)

        # check to make sure image is still in DB
        catalog_client = internal_client_for(CatalogClient, userId)
        try:
            image_record = catalog_client.get_image(imageDigest)
            if not image_record:
                raise Exception("empty image record from catalog")
        except Exception as err:
            logger.warn(
                "dequeued image cannot be fetched from catalog - skipping analysis ("
                + str(imageDigest) + ") - exception: " + str(err))
            return (True)

        logger.info("image dequeued for analysis: " + str(userId) + " : " +
                    str(imageDigest))
        if image_record[
                'analysis_status'] != anchore_engine.subsys.taskstate.base_state(
                    'analyze'):
            logger.debug(
                "dequeued image is not in base state - skipping analysis")
            return (True)

        try:
            logger.spew("TIMING MARK0: " + str(int(time.time()) - timer))

            last_analysis_status = image_record['analysis_status']
            image_record[
                'analysis_status'] = anchore_engine.subsys.taskstate.working_state(
                    'analyze')
            rc = catalog_client.update_image(imageDigest, image_record)

            # actually do analysis
            registry_creds = catalog_client.get_registry()
            try:
                image_data = perform_analyze(
                    userId,
                    manifest,
                    image_record,
                    registry_creds,
                    layer_cache_enable=layer_cache_enable,
                    parent_manifest=parent_manifest)
            except AnchoreException as e:
                event = events.ImageAnalysisFailed(user_id=userId,
                                                   image_digest=imageDigest,
                                                   error=e.to_dict())
                analysis_events.append(event)
                raise

            imageId = None
            try:
                imageId = image_data[0]['image']['imageId']
            except Exception as err:
                logger.warn(
                    "could not get imageId after analysis or from image record - exception: "
                    + str(err))

            logger.info(
                "adding image analysis data to catalog: userid={} imageId={} imageDigest={}"
                .format(userId, imageId, imageDigest))
            try:
                logger.debug("archiving analysis data")
                rc = catalog_client.put_document('analysis_data', imageDigest,
                                                 image_data)
            except Exception as e:
                err = CatalogClientError(
                    msg='Failed to upload analysis data to catalog', cause=e)
                event = events.SaveAnalysisFailed(user_id=userId,
                                                  image_digest=imageDigest,
                                                  error=err.to_dict())
                analysis_events.append(event)
                raise err

            if rc:
                try:
                    logger.debug("extracting image content data locally")
                    image_content_data = {}
                    all_content_types = localconfig.get(
                        'image_content_types', []) + localconfig.get(
                            'image_metadata_types', [])
                    for content_type in all_content_types:
                        try:
                            image_content_data[
                                content_type] = anchore_engine.common.helpers.extract_analyzer_content(
                                    image_data,
                                    content_type,
                                    manifest=manifest)
                        except Exception as err:
                            logger.warn("ERR: {}".format(err))
                            image_content_data[content_type] = {}

                    if image_content_data:
                        logger.debug("adding image content data to archive")
                        rc = catalog_client.put_document(
                            'image_content_data', imageDigest,
                            image_content_data)

                    try:
                        logger.debug(
                            "adding image analysis data to image_record")
                        anchore_engine.common.helpers.update_image_record_with_analysis_data(
                            image_record, image_data)

                    except Exception as err:
                        raise err

                except Exception as err:
                    import traceback
                    traceback.print_exc()
                    logger.warn(
                        "could not store image content metadata to archive - exception: "
                        + str(err))

                logger.info(
                    "adding image to policy engine: userid={} imageId={} imageDigest={}"
                    .format(userId, imageId, imageDigest))
                try:
                    if not imageId:
                        raise Exception(
                            "cannot add image to policy engine without an imageId"
                        )

                    #localconfig = anchore_engine.configuration.localconfig.get_config()
                    verify = localconfig['internal_ssl_verify']

                    pe_client = internal_client_for(PolicyEngineClient, userId)

                    try:
                        logger.debug(
                            "clearing any existing image record in policy engine: {} / {} / {}"
                            .format(userId, imageId, imageDigest))
                        rc = pe_client.delete_image(user_id=userId,
                                                    image_id=imageId)
                    except Exception as err:
                        logger.warn("exception on pre-delete - exception: " +
                                    str(err))

                    client_success = False
                    last_exception = None
                    for retry_wait in [1, 3, 5, 0]:
                        try:
                            logger.debug(
                                'loading image into policy engine: {} / {} / {}'
                                .format(userId, imageId, imageDigest))
                            image_analysis_fetch_url = 'catalog://' + str(
                                userId) + '/analysis_data/' + str(imageDigest)
                            logger.debug("policy engine request: " +
                                         image_analysis_fetch_url)
                            resp = pe_client.ingress_image(
                                userId, imageId, image_analysis_fetch_url)
                            logger.debug("policy engine image add response: " +
                                         str(resp))
                            client_success = True
                            break
                        except Exception as e:
                            logger.warn(
                                "attempt failed, will retry - exception: {}".
                                format(e))
                            last_exception = e
                            time.sleep(retry_wait)
                    if not client_success:
                        raise last_exception

                except Exception as err:
                    newerr = PolicyEngineClientError(
                        msg='Adding image to policy-engine failed',
                        cause=str(err))
                    event = events.PolicyEngineLoadAnalysisFailed(
                        user_id=userId,
                        image_digest=imageDigest,
                        error=newerr.to_dict())
                    analysis_events.append(event)
                    raise newerr

                logger.debug("updating image catalog record analysis_status")

                last_analysis_status = image_record['analysis_status']
                image_record[
                    'analysis_status'] = anchore_engine.subsys.taskstate.complete_state(
                        'analyze')
                image_record['analyzed_at'] = int(time.time())
                rc = catalog_client.update_image(imageDigest, image_record)

                try:
                    annotations = {}
                    try:
                        if image_record.get('annotations', '{}'):
                            annotations = json.loads(
                                image_record.get('annotations', '{}'))
                    except Exception as err:
                        logger.warn(
                            "could not marshal annotations from json - exception: "
                            + str(err))

                    for image_detail in image_record['image_detail']:
                        fulltag = image_detail['registry'] + "/" + image_detail[
                            'repo'] + ":" + image_detail['tag']
                        last_payload = {
                            'imageDigest': imageDigest,
                            'analysis_status': last_analysis_status,
                            'annotations': annotations
                        }
                        curr_payload = {
                            'imageDigest': imageDigest,
                            'analysis_status': image_record['analysis_status'],
                            'annotations': annotations
                        }
                        npayload = {
                            'last_eval': last_payload,
                            'curr_eval': curr_payload,
                        }
                        if annotations:
                            npayload['annotations'] = annotations

                        #original method
                        #rc = anchore_engine.subsys.notifications.queue_notification(userId, fulltag, 'analysis_update', npayload)

                        # new method
                        npayload['subscription_type'] = 'analysis_update'
                        event = events.UserAnalyzeImageCompleted(
                            user_id=userId, full_tag=fulltag, data=npayload)
                        analysis_events.append(event)

                except Exception as err:
                    logger.warn(
                        "failed to enqueue notification on image analysis state update - exception: "
                        + str(err))

            else:
                err = CatalogClientError(
                    msg='Failed to upload analysis data to catalog',
                    cause='Invalid response from catalog API - {}'.format(
                        str(rc)))
                event = events.SaveAnalysisFailed(user_id=userId,
                                                  image_digest=imageDigest,
                                                  error=err.to_dict())
                analysis_events.append(event)
                raise err

            logger.info("analysis complete: " + str(userId) + " : " +
                        str(imageDigest))

            logger.spew("TIMING MARK1: " + str(int(time.time()) - timer))

            try:
                run_time = float(time.time() - timer)

                anchore_engine.subsys.metrics.histogram_observe(
                    'anchore_analysis_time_seconds',
                    run_time,
                    buckets=[
                        1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0,
                        1800.0, 3600.0
                    ],
                    status="success")

            except Exception as err:
                logger.warn(str(err))
                pass

        except Exception as err:
            run_time = float(time.time() - timer)
            logger.exception("problem analyzing image - exception: " +
                             str(err))
            anchore_engine.subsys.metrics.histogram_observe(
                'anchore_analysis_time_seconds',
                run_time,
                buckets=[
                    1.0, 5.0, 10.0, 30.0, 60.0, 120.0, 300.0, 600.0, 1800.0,
                    3600.0
                ],
                status="fail")
            image_record[
                'analysis_status'] = anchore_engine.subsys.taskstate.fault_state(
                    'analyze')
            image_record[
                'image_status'] = anchore_engine.subsys.taskstate.fault_state(
                    'image_status')
            rc = catalog_client.update_image(imageDigest, image_record)

            if userId and imageDigest:
                for image_detail in image_record['image_detail']:
                    fulltag = image_detail['registry'] + "/" + image_detail[
                        'repo'] + ":" + image_detail['tag']
                    event = events.UserAnalyzeImageFailed(user_id=userId,
                                                          full_tag=fulltag,
                                                          error=str(err))
                    analysis_events.append(event)
        finally:
            if analysis_events:
                for event in analysis_events:
                    try:
                        catalog_client.add_event(event)
                    except:
                        logger.error('Ignoring error sending event')

    except Exception as err:
        logger.warn("job processing bailed - exception: " + str(err))
        raise err

    return (True)
예제 #12
0
def download_image(fulltag, copydir, user=None, pw=None, verify=True, manifest=None, parent_manifest=None, use_cache_dir=None, dest_type='oci'):
    try:
        proc_env = os.environ.copy()
        if user and pw:
            proc_env['SKOPUSER'] = user
            proc_env['SKOPPASS'] = pw
            credstr = '--src-creds \"${SKOPUSER}\":\"${SKOPPASS}\"'
        else:
            credstr = ""

        if verify:
            tlsverifystr = "--src-tls-verify=true"
        else:
            tlsverifystr = "--src-tls-verify=false"

        if use_cache_dir and os.path.exists(use_cache_dir):
            cachestr = "--dest-shared-blob-dir " + use_cache_dir
        else:
            cachestr = ""

        localconfig = anchore_engine.configuration.localconfig.get_config()
        global_timeout = localconfig.get('skopeo_global_timeout', 0)
        try:
            global_timeout = int(global_timeout)
            if global_timeout < 0:
                global_timeout = 0
        except:
            global_timeout = 0

        if global_timeout:
            global_timeout_str = "--command-timeout {}s".format(global_timeout)
        else:
            global_timeout_str = ""

        os_overrides = [""]
        if manifest:
            manifest_data = json.loads(manifest)

            # skopeo doesn't support references in manifests for copy/download operations, with oci dest type - if found, override with dir dest_type
            for l in manifest_data.get('layers', []):
                if 'foreign.diff' in l.get('mediaType', ""):
                    dest_type = 'dir'

            if parent_manifest:
                parent_manifest_data = json.loads(parent_manifest)
            else:
                parent_manifest_data = {}

            if parent_manifest_data:
                for mlist in parent_manifest_data.get('manifests', []):
                    imageos = mlist.get('platform', {}).get('os', "")
                    if imageos not in ["", 'linux'] and imageos not in os_overrides:
                        dest_type = 'dir'
                        os_overrides.insert(0, imageos)

        for os_override in os_overrides:
            success = False
            if os_override not in ["", 'linux']:
                dest_type = 'dir'
                os_override_str = "--override-os {}".format(os_override)
            else:
                os_override_str = ""
                
            if dest_type == 'oci':
                if manifest:
                    with open(os.path.join(copydir, "manifest.json"), 'w') as OFH:
                        OFH.write(manifest)

                if parent_manifest:
                    with open(os.path.join(copydir, "parent_manifest.json"), 'w') as OFH:
                        OFH.write(parent_manifest)
                        
                cmd = ["/bin/sh", "-c", "skopeo {} {} copy {} {} {} docker://{} oci:{}:image".format(os_override_str, global_timeout_str, tlsverifystr, credstr, cachestr, fulltag, copydir)]
            else:
                cmd = ["/bin/sh", "-c", "skopeo {} {} copy {} {} docker://{} dir:{}".format(os_override_str, global_timeout_str, tlsverifystr, credstr, fulltag, copydir)]

            cmdstr = ' '.join(cmd)
            try:
                rc, sout, serr = run_command_list(cmd, env=proc_env)
                if rc != 0:
                    skopeo_error = SkopeoError(cmd=cmd, rc=rc, out=sout, err=serr)
                    if skopeo_error.error_code != AnchoreError.OSARCH_MISMATCH.name:
                        raise SkopeoError(cmd=cmd, rc=rc, out=sout, err=serr)                    
                else:
                    logger.debug("command succeeded: cmd="+str(cmdstr)+" stdout="+str(sout).strip()+" stderr="+str(serr).strip())
                    success = True                    

            except Exception as err:
                logger.error("command failed with exception - " + str(err))
                raise err

            if success:
                break
        if not success:
            logger.error("could not download image")
            raise Exception("could not download image")
    except Exception as err:
        raise err

    return(True)
예제 #13
0
def download_image(fulltag,
                   copydir,
                   user=None,
                   pw=None,
                   verify=True,
                   manifest=None,
                   use_cache_dir=None,
                   dest_type='oci'):
    try:
        proc_env = os.environ.copy()
        if user and pw:
            proc_env['SKOPUSER'] = user
            proc_env['SKOPPASS'] = pw
            credstr = '--src-creds \"${SKOPUSER}\":\"${SKOPPASS}\"'
        else:
            credstr = ""

        if verify:
            tlsverifystr = "--src-tls-verify=true"
        else:
            tlsverifystr = "--src-tls-verify=false"

        if use_cache_dir and os.path.exists(use_cache_dir):
            cachestr = "--dest-shared-blob-dir " + use_cache_dir
        else:
            cachestr = ""

        localconfig = anchore_engine.configuration.localconfig.get_config()
        global_timeout = localconfig.get('skopeo_global_timeout', 0)
        try:
            global_timeout = int(global_timeout)
            if global_timeout < 0:
                global_timeout = 0
        except:
            global_timeout = 0

        if global_timeout:
            global_timeout_str = "--command-timeout {}s".format(global_timeout)
        else:
            global_timeout_str = ""

        if dest_type == 'oci':
            if manifest:
                with open(os.path.join(copydir, "manifest.json"), 'w') as OFH:
                    OFH.write(manifest)
            cmd = [
                "/bin/sh", "-c",
                "skopeo {} copy {} {} {} docker://{} oci:{}:image".format(
                    global_timeout_str, tlsverifystr, credstr, cachestr,
                    fulltag, copydir)
            ]
        else:
            cmd = [
                "/bin/sh", "-c",
                "skopeo {} copy {} {} docker://{} dir:{}".format(
                    global_timeout_str, tlsverifystr, credstr, fulltag,
                    copydir)
            ]

        cmdstr = ' '.join(cmd)
        try:
            rc, sout, serr = run_command_list(cmd, env=proc_env)
            if rc != 0:
                raise SkopeoError(cmd=cmd, rc=rc, out=sout, err=serr)
            else:
                logger.debug("command succeeded: cmd=" + str(cmdstr) +
                             " stdout=" + str(sout).strip() + " stderr=" +
                             str(serr).strip())

        except Exception as err:
            logger.error("command failed with exception - " + str(err))
            raise err
    except Exception as err:
        raise err

    return (True)
예제 #14
0
def get_image_manifest_skopeo(url,
                              registry,
                              repo,
                              intag=None,
                              indigest=None,
                              topdigest=None,
                              user=None,
                              pw=None,
                              verify=True):
    manifest = {}
    digest = None
    testDigest = None

    if indigest:
        pullstring = registry + "/" + repo + "@" + indigest
    elif intag:
        pullstring = registry + "/" + repo + ":" + intag
    else:
        raise Exception(
            "invalid input - must supply either an intag or indigest")

    try:
        proc_env = os.environ.copy()
        if user and pw:
            proc_env['SKOPUSER'] = user
            proc_env['SKOPPASS'] = pw
            credstr = '--creds \"${SKOPUSER}\":\"${SKOPPASS}\"'
        else:
            credstr = ""

        if verify:
            tlsverifystr = "--tls-verify=true"
        else:
            tlsverifystr = "--tls-verify=false"

        localconfig = anchore_engine.configuration.localconfig.get_config()
        global_timeout = localconfig.get('skopeo_global_timeout', 0)
        try:
            global_timeout = int(global_timeout)
            if global_timeout < 0:
                global_timeout = 0
        except:
            global_timeout = 0

        if global_timeout:
            global_timeout_str = "--command-timeout {}s".format(global_timeout)
        else:
            global_timeout_str = ""

        try:
            cmd = [
                "/bin/sh", "-c",
                "skopeo {} inspect --raw {} {} docker://{}".format(
                    global_timeout_str, tlsverifystr, credstr, pullstring)
            ]
            cmdstr = ' '.join(cmd)
            try:
                rc, sout, serr = run_command_list(cmd, env=proc_env)
                if rc != 0:
                    raise SkopeoError(cmd=cmd, rc=rc, out=sout, err=serr)
                else:
                    logger.debug("command succeeded: cmd=" + str(cmdstr) +
                                 " stdout=" + str(sout).strip() + " stderr=" +
                                 str(serr).strip())
            except Exception as err:
                logger.error("command failed with exception - " + str(err))
                raise err

            sout = str(sout, 'utf-8') if sout else None
            digest = manifest_to_digest(sout)
            manifest = json.loads(sout)
            if not topdigest:
                topdigest = digest

            if manifest.get('schemaVersion') == 2 and manifest.get(
                    'mediaType'
            ) == 'application/vnd.docker.distribution.manifest.list.v2+json':
                # Get the arch-specific version for amd64 and linux
                new_digest = None
                for entry in manifest.get('manifests'):
                    platform = entry.get('platform')
                    if platform and platform.get('architecture') in [
                            'amd64'
                    ] and platform.get('os') == 'linux':
                        new_digest = entry.get('digest')
                        break

                return get_image_manifest_skopeo(url=url,
                                                 registry=registry,
                                                 repo=repo,
                                                 intag=None,
                                                 indigest=new_digest,
                                                 user=user,
                                                 pw=pw,
                                                 verify=verify,
                                                 topdigest=topdigest)
        except Exception as err:
            logger.warn("CMD failed - exception: " + str(err))
            raise err

    except Exception as err:
        import traceback
        traceback.print_exc()
        raise err

    if not manifest or not digest:
        raise SkopeoError(msg="No digest/manifest from skopeo")

    return (manifest, digest, topdigest)