コード例 #1
0
    def test_instructionchecktrigger_notin(self):
        t, gate, test_context = self.get_initialized_trigger(
            InstructionCheckTrigger.__trigger_name__,
            instruction="USER",
            check="not_in",
            value="root",
        )
        test_image.dockerfile_contents = "RUN apt-get install blah1 balh2 blah2 testuser1\nRUN echo hi\nUSER testuser\n"
        test_context = gate.prepare_context(test_image, test_context)
        t.evaluate(test_image, test_context)
        self.assertEqual(len(t.fired), 1)
        logger.info("Fired: {}".format([x.json() for x in t.fired]))

        t, gate, test_context = self.get_initialized_trigger(
            InstructionCheckTrigger.__trigger_name__,
            instruction="USER",
            check="not_in",
            value="root,testuser",
        )
        test_image.dockerfile_contents = "RUN apt-get install blah1 balh2 blah2 testuser1\nRUN echo hi\nUSER testuser\n"
        test_context = gate.prepare_context(test_image, test_context)
        t.evaluate(test_image, test_context)
        self.assertEqual(len(t.fired), 0)
        logger.info("Fired: {}".format([x.json() for x in t.fired]))
コード例 #2
0
ファイル: __init__.py プロジェクト: sjambhule/anchore-engine
def handle_feed_sync(*args, **kwargs):
    """
    Initiates a feed sync in the system in response to a message from the queue

    :param args:
    :param kwargs:
    :return:
    """
    system_user = _system_creds()

    logger.info('init args: {}'.format(kwargs))
    cycle_time = kwargs['mythread']['cycle_timer']

    while True:

        config = localconfig.get_config()
        feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True)

        if feed_sync_enabled:
            try:
                all_ready = anchore_engine.clients.services.common.check_services_ready(['simplequeue'])
                if not all_ready:
                    logger.info("simplequeue service not yet ready, will retry")
                else:
                    try:
                        simplequeue.run_target_with_queue_ttl(system_user, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180)
                    except Exception as err:
                        logger.warn("failed to process task this cycle: " + str(err))
            except Exception as e:
                logger.error('Caught escaped error in feed sync handler: {}'.format(e))
        else:
            logger.debug("sync_enabled is set to false in config - skipping feed sync")

        time.sleep(cycle_time)

    return True
コード例 #3
0
    def load_retrieved_files(self, analysis_report, image_obj):
        """
        Loads the analyzer retrieved files from the image, saves them in the db

        :param retrieve_files_json:
        :param image_obj:
        :return:
        """
        logger.info('Loading retrieved files')
        retrieve_files_json = analysis_report.get('retrieve_files')
        if not retrieve_files_json:
            return []

        matches = retrieve_files_json.get('file_content.all',
                                          {}).get('base', {})
        records = []

        for filename, match_string in list(matches.items()):
            match = AnalysisArtifact()
            match.image_user_id = image_obj.user_id
            match.image_id = image_obj.id
            match.analyzer_id = 'retrieve_files'
            match.analyzer_type = 'base'
            match.analyzer_artifact = 'file_content.all'
            match.artifact_key = filename
            try:
                match.binary_value = base64.b64decode(
                    ensure_bytes(match_string))
            except:
                logger.exception(
                    'Could not b64 decode the file content for {}'.format(
                        filename))
                raise
            records.append(match)

        return records
コード例 #4
0
    def __init__(self, data_dir=None, load_from_file=None):
        self.root_dir = data_dir if data_dir else os.curdir
        self.src = load_from_file

        logger.info('Using local test data dir: {}'.format(self.root_dir))

        if self.src:
            raise NotImplementedError('Load from tarball not yet implemented')

        self.images_dir = os.path.join(self.root_dir, self.IMAGES_DIR)
        self.feeds_dir = os.path.join(self.root_dir, self.FEEDS_DIR)
        self.db_dir = os.path.join(self.root_dir, self.DB_DIR)
        self.bundle_dir = os.path.join(self.root_dir, self.BUNDLES_DIR)
        self.img_meta_path = os.path.join(self.images_dir,
                                          self.IMAGE_METADATA_FILENAME)

        with open(self.img_meta_path) as f:
            self.image_map = json.load(f)

        self.bundles = {}
        for f in os.listdir(self.bundle_dir):
            with open(os.path.join(self.bundle_dir, f)) as fd:
                b = json.load(fd, parse_int=str, parse_float=str)
            self.bundles[b['id']] = b
コード例 #5
0
def initialize_direct(obj_store_config, manager_id, check_db=False):
    """
    Given a fully-prepared configuration, initialize the manager and set the id.

    :param obj_store_config: dict, fully ready configuration to use
    :param manager_id:
    :param check_db:
    :return:
    """
    global manager_singleton

    validate_config(obj_store_config)

    manager_singleton[manager_id] = ObjectStorageManager(obj_store_config)

    if check_db:
        supported, unsupported = manager_singleton.check_drivers()
        if unsupported:
            raise Exception(
                'Archive subsys initialization found records in the metadata db that require drivers not configured: {}'
                .format(unsupported))

    logger.info('Archive {} initialization complete'.format(manager_id))
    return True
コード例 #6
0
def do_feed_sync(msg):
    if 'FeedsUpdateTask' not in locals():
        from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask

    if 'get_selected_feeds_to_sync' not in locals():
        from anchore_engine.services.policy_engine.engine.feeds import get_selected_feeds_to_sync

    handler_success = False
    timer = time.time()
    logger.info("FIRING: feed syncer")
    try:
        feeds = get_selected_feeds_to_sync(localconfig.get_config())
        logger.info('Syncing configured feeds: {}'.format(feeds))
        result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get('data'))

        if result is not None:
            handler_success = True
        else:
            logger.warn('Feed sync task marked as disabled, so skipping')
    except ValueError as e:
        logger.warn('Received msg of wrong type')
    except Exception as err:
        logger.warn("failure in feed sync handler - exception: " + str(err))

    if handler_success:
        anchore_engine.subsys.metrics.summary_observe(
            'anchore_monitor_runtime_seconds',
            time.time() - timer,
            function='do_feed_sync',
            status="success")
    else:
        anchore_engine.subsys.metrics.summary_observe(
            'anchore_monitor_runtime_seconds',
            time.time() - timer,
            function='do_feed_sync',
            status="fail")
コード例 #7
0
def handle_feed_sync_trigger(*args, **kwargs):
    """
    Checks to see if there is a task for a feed sync in the queue and if not, adds one.
    Interval for firing this should be longer than the expected feed sync duration.

    :param args:
    :param kwargs:
    :return:
    """
    system_user = _system_creds()

    logger.info('init args: {}'.format(kwargs))
    cycle_time = kwargs['mythread']['cycle_timer']

    while True:
        config = localconfig.get_config()
        feed_sync_enabled = config.get('feeds', {}).get('sync_enabled', True)
        if feed_sync_enabled:
            logger.info('Feed Sync task creator activated')
            try:
                push_sync_task(system_user)
                logger.info('Feed Sync Trigger done, waiting for next cycle.')
            except Exception as e:
                logger.error(
                    'Error caught in feed sync trigger handler after all retries. Will wait for next cycle'
                )
            finally:
                logger.info('Feed Sync task creator complete')
        else:
            logger.info(
                "sync_enabled is set to false in config - skipping feed sync trigger"
            )

        time.sleep(cycle_time)

    return True
コード例 #8
0
def test_vuln_image_updates(test_data_env):
    sync_feeds(test_data_env, up_to=datetime.datetime(2017, 6, 1))
    _load_images(test_data_env)

    # Get the first set
    initial_vulns = _img_vulns(test_data_env.get_images_named('ruby')[0][0])

    # Rollback the sync time to get another sync with data
    db = get_thread_scoped_session()
    try:
        f = reset_feed_sync_time(db, datetime.datetime(2017, 6, 1), feed_name='vulnerabilities')
        db.add(f)
        db.commit()
    except:
        logger.exception('Exception commiting update of feed sync timestamps')
        db.rollback()

    # Sync again to get new merged data
    sync_feeds(test_data_env, up_to=datetime.datetime.utcnow())
    check_fix_version(test_data_env)

    rescan_img_id = list(test_data_env.image_map.keys())[0]
    updated_vulns = _img_vulns(test_data_env.get_images_named('ruby')[0][0])
    logger.info(json.dumps(updated_vulns, indent=2))
コード例 #9
0
def query_images_by_package_get(user_id,
                                name=None,
                                version=None,
                                package_type=None):
    log.info("Querying images by package {}".format(name))
    try:
        session = get_session()
        request_inputs = anchore_engine.apis.do_request_prep(
            connexion.request,
            default_params={
                "name": name,
                "version": version,
                "package_type": package_type,
            },
        )
        return_object, httpcode = query_images_by_package(
            session, request_inputs)
    except Exception as err:
        httpcode = 500
        return_object = str(err)
    finally:
        session.close()

    return return_object, httpcode
コード例 #10
0
    def test_fixavailableparam(self):
        t, gate, test_context = self.get_initialized_trigger(
            VulnerabilityMatchTrigger.__trigger_name__,
            package_type='all',
            severity='medium',
            severity_comparison='>=',
            fix_available='True')
        test_context = gate.prepare_context(self.test_image, test_context)
        t.evaluate(self.test_image, test_context)
        logger.info('Fired: {}'.format(t.fired))
        # CVE-TEST-TEST3
        self.assertGreaterEqual(len(t.fired), 1)

        t, gate, test_context = self.get_initialized_trigger(
            VulnerabilityMatchTrigger.__trigger_name__,
            fix_available='False',
            severity='unknown',
            severity_comparison='>=',
            package_type='all')
        test_context = gate.prepare_context(self.test_image, test_context)
        t.evaluate(self.test_image, test_context)
        logger.info('Fired: {}'.format(t.fired))
        # CVE-TEST-TEST0
        self.assertGreaterEqual(len(t.fired), 1)
コード例 #11
0
def test_anchore_permissions():
    """
    Test permission comparisons with mixed-case, wild-cards, etc

    :return:
    """

    logger.info(
        "Testing permission wildcard matches and mixed-case comparisions")
    # Default, case-sensitive, exact match
    assert CaseSensitivePermission(
        wildcard_string="Account1:listImages:*").implies(
            CaseSensitivePermission(wildcard_string="Account1:listImages:*"))

    # Ignore case
    assert CaseSensitivePermission(
        wildcard_string="account1:listImages:*", case_sensitive=False).implies(
            CaseSensitivePermission(wildcard_string="Account1:listImages:*",
                                    case_sensitive=False))

    # Mixed case, mismatch
    assert not CaseSensitivePermission(
        wildcard_string="account1:listImages:*").implies(
            CaseSensitivePermission(wildcard_string="Account1:listImages:*"))
コード例 #12
0
def sync_feeds(test_env, up_to=None):
    df = DataFeeds.instance()
    if up_to:
        test_env.set_max_feed_time(up_to)

    logger.info('Syncing vuln')
    df.vulnerabilities.sync(item_processing_fn=FeedsUpdateTask.process_updated_vulnerability)
    logger.info('Syncing packages')
    df.packages.sync()
    logger.info('Sync complete')
コード例 #13
0
def test_package_sync(run_legacy_sync_for_feeds):
    with session_scope() as db:
        ncount = db.query(NpmMetadata).count()
        gcount = db.query(GemMetadata).count()
    assert ncount == 0, "Not starting with empty table"
    assert gcount == 0, "Not starting with empty table"

    logger.info("Syncing packages")
    t = time.time()
    run_legacy_sync_for_feeds(["packages"])
    t = time.time() - t
    logger.info("Done with packages. Took: {} sec".format(t))
    with session_scope() as db:
        ncount = db.query(NpmMetadata).count()
        gcount = db.query(GemMetadata).count()

    logger.info("Has {} npm records".format(ncount))
    logger.info("Has {} gem records".format(gcount))
コード例 #14
0
def test_vuln_sync(test_data_env):
    with session_scope() as db:
        vcount = db.query(Vulnerability).count()

    logger.info('Starting with {} vuln records'.format(vcount))
    assert vcount == 0, 'Not starting with empty table'

    logger.info('Syncing vulnerabilities')
    t = time.time()
    DataFeeds.__scratch_dir__ = '/tmp'
    DataFeeds.sync(to_sync=['vulnerabilities'],
                   feed_client=test_data_env.feed_client)

    t = time.time() - t
    logger.info('Done with vulnerabilities. Took: {} sec'.format(t))
    with session_scope() as db:
        logger.info('Has {} vuln records'.format(
            db.query(Vulnerability).count()))
コード例 #15
0
def test_vuln_sync(test_data_env):
    with session_scope() as db:
        vcount = db.query(Vulnerability).count()

    logger.info("Starting with {} vuln records".format(vcount))
    assert vcount == 0, "Not starting with empty table"

    logger.info("Syncing vulnerabilities")
    t = time.time()
    DataFeeds.__scratch_dir__ = "/tmp"
    DataFeeds.sync(to_sync=["vulnerabilities"],
                   feed_client=test_data_env.feed_client)

    t = time.time() - t
    logger.info("Done with vulnerabilities. Took: {} sec".format(t))
    with session_scope() as db:
        logger.info("Has {} vuln records".format(
            db.query(Vulnerability).count()))
コード例 #16
0
def test_basic_whitelist_evaluation(bundle, test_data_env_with_images_loaded):
    default_bundle = bundle()
    logger.info('Building executable bundle from default bundle')
    test_tag = 'docker.io/library/node:latest'
    built = build_bundle(default_bundle, for_tag=test_tag)
    assert not built.init_errors
    logger.info(('Got: {}'.format(built)))

    db = get_session()
    img_obj = db.query(Image).get((test_data_env_with_images_loaded.get_images_named('node')[0][0], '0'))

    assert img_obj is not None, 'Failed to get an image object to test'
    evaluation = built.execute(img_obj, tag=test_tag,
                               context=ExecutionContext(db_session=db, configuration={}))

    assert evaluation is not None, 'Got None eval'
    logger.info((json.dumps(evaluation.json(), indent=2)))
    logger.info((json.dumps(evaluation.as_table_json(), indent=2)))
コード例 #17
0
def check_all_imgs_vuln():
    db = get_thread_scoped_session()
    try:
        for img in db.query(Image).all():
            logger.info('Checking vulnerabilities for image: {}'.format(img.id))
            if not img:
                logger.info('No image found with id: {}'.format(img.id))
                raise Exception('Should have image')
            vulns = vulnerabilities.vulnerabilities_for_image(img)

            for v in vulns:
                db.merge(v)
            db.commit()

            logger.info('Found: {}'.format(vulns))
    except Exception as e:
        logger.info('Error! {}'.format(e))
        end_session()
コード例 #18
0
def test_initialize_users(anchore_db):
    try:
        test_creds = {
            "users": {
                "user1": {"password": "******", "email": "user1@email"},
                "user2": {"password": "******", "email": "user2@email"},
            }
        }

        with session_scope() as session:
            bootstrapper = identities.IdentityBootstrapper(
                identities.IdentityManager, session
            )
            bootstrapper.initialize_user_identities_from_config(test_creds)

        with session_scope() as session:
            mgr = identities.manager_factory.for_session(session)
            accnt1 = mgr.get_account("user1")

        logger.info(str(accnt1))
        assert accnt1 is not None

        with session_scope() as session:
            mgr = identities.manager_factory.for_session(session)
            user1, cred1 = mgr.get_credentials_for_username("user1")

        logger.info(str(cred1))
        assert cred1 is not None

        with session_scope() as session:
            mgr = identities.manager_factory.for_session(session)
            accnt2 = mgr.get_account("user2")

        logger.info(str(accnt2))
        assert accnt2 is not None

        with session_scope() as session:
            mgr = identities.manager_factory.for_session(session)
            user2, cred2 = mgr.get_credentials_for_username("user2")

        logger.info(str(cred2))
        assert cred2 is not None
    finally:
        tearDown()
コード例 #19
0
ファイル: service.py プロジェクト: ksarabi/anchore-engine
    def _db_connect(self):
        """
        Initialize the db connection and prepare the db
        :return:
        """
        logger.info('Configuring db connection')
        if not self.db_connect:
            logger.info('DB Connection disabled in configuration for service {}. Skipping db init'.format(self.__service_name__))
            return True

        logger.info('Initializing database')
        # connect to DB
        try:
            initialize_db(localconfig=self.global_configuration, versions=self.versions)
        except Exception as err:
            logger.error('cannot connect to configured DB: exception - ' + str(err))
            raise err

        logger.info('DB connection initialization complete')
コード例 #20
0
def test_package_sync(test_data_env):
    with session_scope() as db:
        ncount = db.query(NpmMetadata).count()
        gcount = db.query(GemMetadata).count()
    assert ncount == 0, "Not starting with empty table"
    assert gcount == 0, "Not starting with empty table"

    logger.info("Syncing packages")
    t = time.time()
    DataFeeds.__scratch_dir__ = "/tmp"
    DataFeeds.sync(to_sync=["packages"], feed_client=test_data_env.feed_client)
    t = time.time() - t
    logger.info("Done with packages. Took: {} sec".format(t))
    with session_scope() as db:
        ncount = db.query(NpmMetadata).count()
        gcount = db.query(GemMetadata).count()

    logger.info("Has {} npm records".format(ncount))
    logger.info("Has {} gem records".format(gcount))
コード例 #21
0
def test_package_sync(test_data_env):
    with session_scope() as db:
        ncount = db.query(NpmMetadata).count()
        gcount = db.query(GemMetadata).count()
    assert ncount == 0, 'Not starting with empty table'
    assert gcount == 0, 'Not starting with empty table'

    logger.info('Syncing packages')
    t = time.time()
    DataFeeds.__scratch_dir__ = '/tmp'
    DataFeeds.sync(to_sync=['packages'], feed_client=test_data_env.feed_client)
    t = time.time() - t
    logger.info('Done with packages. Took: {} sec'.format(t))
    with session_scope() as db:
        ncount = db.query(NpmMetadata).count()
        gcount = db.query(GemMetadata).count()

    logger.info('Has {} npm records'.format(ncount))
    logger.info('Has {} gem records'.format(gcount))
コード例 #22
0
def test_feed_sync():
    test_client = get_feeds_client(
        SyncConfig(
            enabled=True,
            url=feed_url,
            username="******",
            password="******",
            connection_timeout_seconds=10,
            read_timeout_seconds=30,
        )
    )
    for f in test_client.list_feeds().feeds:
        try:
            test_client.list_feed_groups(f.name)
        except Exception as e:
            logger.info(("Caught: {} for feed:  {}".format(e, f)))

    next_token = False
    since_time = None
    feed = "vulnerabilities"
    group = "alpine:3.6"
    last_token = None

    while next_token is not None:
        logger.info("Getting a page of data")
        if next_token:
            last_token = next_token
            logger.info("Using token: {}".format(next_token))
            data = test_client.get_feed_group_data(
                feed, group, since=since_time, next_token=next_token
            )
        else:
            last_token = None
            data = test_client.get_feed_group_data(feed, group, since=since_time)

        next_token = data.next_token
        logger.info(
            "Got {} items and new next token: {}".format(data.record_count, next_token)
        )

        if next_token:
            assert next_token != last_token
        assert len(data.data) > 0
コード例 #23
0
def test_package_sync(test_data_env):
    with session_scope() as db:
        ncount = db.query(NpmMetadata).count()
        gcount = db.query(GemMetadata).count()
    assert ncount == 0, 'Not starting with empty table'
    assert gcount == 0, 'Not starting with empty table'

    df = DataFeeds.instance()
    logger.info('Syncing packages')
    t = time.time()
    df.packages.sync()
    t = time.time() - t
    logger.info('Done with packages. Took: {} sec'.format(t))
    with session_scope() as db:
        ncount = db.query(NpmMetadata).count()
        gcount = db.query(GemMetadata).count()

    logger.info('Has {} npm records'.format(ncount))
    logger.info('Has {} gem records'.format(gcount))
コード例 #24
0
def cls_anchore_db(connection_str=None, do_echo=False):
    """
    Sets up a db connection to an existing db, and fails if not found/present.

    This is for use in legacy unittest frameworks where it is set once at the class level, not on each function.
    :return:
    """

    from anchore_engine.db.entities.common import (
        get_engine,
        initialize,
        do_disconnect,
        init_thread_session,
        end_session,
    )
    from anchore_engine.db.entities.upgrade import do_create_tables

    conn_str = connection_str if connection_str else os.getenv("ANCHORE_TEST_DB_URL")

    config = {"credentials": {"database": {"db_connect": conn_str, "db_echo": do_echo}}}

    try:
        logger.info("Initializing connection: {}".format(config))
        ret = initialize(localconfig=config)
        init_thread_session(force_new=True)

        engine = get_engine()
        logger.info("Dropping db if found")
        engine.execute("DROP SCHEMA public CASCADE")
        engine.execute("CREATE SCHEMA public")
        engine.execute("GRANT ALL ON SCHEMA public TO postgres")
        engine.execute("GRANT ALL ON SCHEMA public TO public")

        # Now ready for anchore init (tables etc)
        logger.info("Creating tables")
        do_create_tables()

        yield ret
    finally:
        logger.info("Cleaning up/disconnect")
        end_session()
        do_disconnect()
コード例 #25
0
def test_vulnerable_in(
    vulnerability_with_vulnartifact,
    vulnerable_pkg1,
    nonvulnerable_pkg1,
    monkeypatch_distros,
):
    """
    Test vulnerable in matches
    :return:
    """

    f = vulnerability_with_vulnartifact.vulnerable_in[0]
    logger.info("Testing package %s", vulnerable_pkg1)
    logger.info("Testing vuln %s", f)
    assert isinstance(f, VulnerableArtifact)
    assert f.match_and_vulnerable(vulnerable_pkg1)
    assert not f.match_and_vulnerable(nonvulnerable_pkg1)

    f = vulnerability_with_vulnartifact.vulnerable_in[1]
    logger.info("Testing package %s", vulnerable_pkg1)
    logger.info("Testing vuln %s", f)
    assert isinstance(f, VulnerableArtifact)
    assert not f.match_and_vulnerable(
        vulnerable_pkg1
    )  # Both not vuln now, this entry is for 0.9.x
    assert not f.match_and_vulnerable(nonvulnerable_pkg1)

    pkg_vuln = ImagePackageVulnerability()
    pkg_vuln.package = vulnerable_pkg1
    pkg_vuln.vulnerability = vulnerability_with_vulnartifact
    pkg_vuln.pkg_type = vulnerable_pkg1.name
    pkg_vuln.pkg_version = vulnerable_pkg1.version
    pkg_vuln.pkg_image_id = vulnerable_pkg1.image_id
    pkg_vuln.pkg_user_id = vulnerable_pkg1.image_user_id
    pkg_vuln.pkg_name = vulnerable_pkg1.name
    pkg_vuln.pkg_arch = vulnerable_pkg1.arch
    pkg_vuln.vulnerability_id = vulnerability_with_vulnartifact.id
    pkg_vuln.vulnerability_namespace_name = (
        vulnerability_with_vulnartifact.namespace_name
    )

    assert pkg_vuln.fixed_in() == None
コード例 #26
0
    def test_blacklist(self):

        # Match
        t, gate, test_context = self.get_initialized_trigger(
            BlackListTrigger.__trigger_name__,
            name="libc6",
            version="2.24-11+deb9u4")
        db = get_thread_scoped_session()

        test_context = gate.prepare_context(self.test_image, test_context)
        t.evaluate(self.test_image, test_context)
        logger.info(("Fired: {}".format(t.fired)))
        self.assertEqual(1, len(t.fired))

        # No match
        t, gate, test_context = self.get_initialized_trigger(
            BlackListTrigger.__trigger_name__,
            name="libc6",
            version="2.24-10+deb9u4")
        db = get_thread_scoped_session()
        test_context = gate.prepare_context(self.test_image, test_context)
        t.evaluate(self.test_image, test_context)
        logger.info(("Fired: {}".format(t.fired)))
        self.assertEqual(0, len(t.fired))

        # Match, name only
        t, gate, test_context = self.get_initialized_trigger(
            BlackListTrigger.__trigger_name__, name="libc6")
        db = get_thread_scoped_session()
        test_context = gate.prepare_context(self.test_image, test_context)
        t.evaluate(self.test_image, test_context)
        logger.info(("Fired: {}".format(t.fired)))
        self.assertEqual(1, len(t.fired))

        # No match
        t, gate, test_context = self.get_initialized_trigger(
            BlackListTrigger.__trigger_name__, name="libc-not-real")
        db = get_thread_scoped_session()
        test_context = gate.prepare_context(self.test_image, test_context)
        t.evaluate(self.test_image, test_context)
        logger.info(("Fired: {}".format(t.fired)))
        self.assertEqual(0, len(t.fired))
コード例 #27
0
def test_bulk_vuln_sync(test_data_env):
    with session_scope() as db:
        vcount = db.query(Vulnerability).count()
        logger.info('Starting with {} vuln records'.format(vcount))
        assert vcount == 0, 'Not starting with empty table'

    df = DataFeeds.instance()
    t = time.time()
    df.vulnerabilities.bulk_sync()
    t = time.time() - t
    logger.info('Done with vulnerabilities. Took: {} sec'.format(t))
    with session_scope() as db:
        logger.info('Has {} vuln records'.format(db.query(Vulnerability).count()))
コード例 #28
0
ファイル: test_leases.py プロジェクト: wcc526/anchore-engine
def test_contextmgr(anchore_db):
    lockid = "testlock"
    lockid2 = "test_lock_2"
    db_locks.init_lease(lockid)
    db_locks.init_lease(lockid2)
    with db_locks.least_with_ttl(lockid, "myid123", ttl=10) as lt:
        logger.info(str(lt))
        with session_scope() as db:
            logger.info(
                ("{}".format("\n".join([str(x)
                                        for x in db.query(Lease).all()]))))

    logger.info(str(lt))
コード例 #29
0
def test_contextmgr(anchore_db):
    lockid = 'testlock'
    lockid2 = 'test_lock_2'
    db_locks.init_lease(lockid)
    db_locks.init_lease(lockid2)
    with db_locks.least_with_ttl(lockid, 'myid123', ttl=10) as lt:
        logger.info(str(lt))
        with session_scope() as db:
            logger.info(
                ('{}'.format('\n'.join([str(x)
                                        for x in db.query(Lease).all()]))))

    logger.info(str(lt))
コード例 #30
0
ファイル: __init__.py プロジェクト: Sh4d1/anchore-engine
def handle_feed_sync_trigger(*args, **kwargs):
    """
    Checks to see if there is a task for a feed sync in the queue and if not, adds one.
    Interval for firing this should be longer than the expected feed sync duration.

    :param args:
    :param kwargs:
    :return:
    """
    system_user = _system_creds()

    logger.info('init args: {}'.format(kwargs))
    cycle_time = kwargs['mythread']['cycle_timer']

    while True:
        try:
            all_ready = anchore_engine.clients.common.check_services_ready(
                ['simplequeue'])
            if not all_ready:
                logger.info("simplequeue service not yet ready, will retry")
            else:
                logger.info('Feed Sync Trigger activated')
                if not simplequeue.is_inqueue(userId=system_user,
                                              name=feed_sync_queuename,
                                              inobj=feed_sync_msg):
                    try:
                        simplequeue.enqueue(userId=system_user,
                                            name=feed_sync_queuename,
                                            inobj=feed_sync_msg)
                    except:
                        logger.exception(
                            'Could not enqueue message for a feed sync')
                logger.info('Feed Sync Trigger done, waiting for next cycle.')
        except Exception as e:
            logger.exception(
                'Error caught in feed sync trigger handler. Will continue. Exception: {}'
                .format(e))

        time.sleep(cycle_time)

    return True