Esempio n. 1
0
def test_json_reads_and_writes_strings():
    c = Config({'greeting': 'Hello'})
    assert c.json.dumps() == '{}'
    assert c.json.dumps(with_defaults=True) == '{\n  "greeting": "Hello"\n}'

    c.json.loads('{"something_nonexistent": 1}')
    assert c.dump_values() == {'greeting': 'Hello'}

    c.json.loads('{"something_nonexistent": 1}', as_defaults=True)
    assert c.dump_values() == {'greeting': 'Hello', 'something_nonexistent': 1}

    c.json.loads('{"greeting": "Hello, world!"}')
    assert c.dump_values() == {'greeting': 'Hello, world!', 'something_nonexistent': 1}
Esempio n. 2
0
def test_config_item_value_can_be_unicode_str(tmpdir):
    config1 = Config({'greeting': u'Hello, {name}', 'name': u'Anonymous'})
    config1.name.value = u'Jānis Bērziņš'
    assert config1.name.type == Types.str

    path = tmpdir.join('config.ini').strpath
    config1.configparser.dump(path, with_defaults=True)

    config2 = Config({'greeting': '', 'name': ''})
    config2.configparser.load(path)
    assert config2.name.value == u'Jānis Bērziņš'
    assert config1.dump_values(with_defaults=True) == config2.dump_values(
        with_defaults=True)
Esempio n. 3
0
def test_to_dict_should_not_include_items_with_no_usable_value():
    config = Config()
    assert config.dump_values() == {}

    config.a = Item()
    config.b = Item()
    config.dummies = Config({'x': Item(), 'y': Item()})
    assert config.dump_values() == {}

    config.dummies.x.value = 'yes'
    assert config.dump_values() == {'dummies': {'x': 'yes'}}

    config.b.value = 'no'
    assert config.dump_values() == {'dummies': {'x': 'yes'}, 'b': 'no'}
Esempio n. 4
0
def test_can_have_a_dict_as_a_config_value_if_wrapped_inside_item():
    # You may want to have a dictionary as a config value if you only
    # change it all together or you only pass it all in one piece.

    config = Config({
        'db': {
            'user': '******',
            'password': '******',
        },
        'aws':
        Item(default={
            'access_key': '123',
            'secret_key': 'secret',
        })
    })

    assert isinstance(config.aws, Item)
    assert config.aws.name == 'aws'

    with pytest.raises(AttributeError):
        assert config.aws.access_key.value == '123'

    assert config.aws.value['access_key'] == '123'

    # This should have no effect because it is working on a copy of the default
    # value, not the real thing.
    config.aws.value['secret_key'] = 'NEW_SECRET'

    assert config.dump_values()['aws'] == {
        'access_key': '123',
        'secret_key': 'secret'
    }
Esempio n. 5
0
def test_config_written_to_and_read_from_yaml_string():
    config_str = (
        'uploads:\n'
        '  enabled: True\n'
        '  threads: 5\n'
        '  db:\n'
        '    user: root\n\n'
    )

    config = Config()
    config.yaml.loads(config_str, as_defaults=True)

    # Make sure the order is preserved
    assert list(config.iter_paths(recursive=True, key='str_path')) == [
        'uploads', 'uploads.enabled', 'uploads.threads', 'uploads.db', 'uploads.db.user'
    ]

    # And the values
    assert config.dump_values() == {
        'uploads': {
            'enabled': True,
            'threads': 5,
            'db': {
                'user': '******',
            }
        }
    }

    config_str2 = config.yaml.dumps(with_defaults=True)

    # TODO Fix this in Python 2
    if six.PY3:
        assert config_str2 == (
            'uploads:\n'
            '  enabled: true\n'
            '  threads: 5\n'
            '  db:\n'
            '    user: root\n'
        )

    config2 = Config()
    config2.yaml.loads(config_str2, as_defaults=True)
    assert list(config2.iter_paths(recursive=True, key='str_path')) == [
        'uploads', 'uploads.enabled', 'uploads.threads', 'uploads.db', 'uploads.db.user'
    ]
    assert config2.dump_values() == config.dump_values()
def test_writes_to_and_reads_from_default_section_transparently(tmpdir):
    config_ini = tmpdir.join('config.ini').strpath

    config1 = Config(
        collections.OrderedDict([('greeting', 'Hello'), ('name', 'World')]))
    config1.configparser.dump(config_ini, with_defaults=True)

    with open(config_ini) as f:
        assert f.read() == ('[NO_SECTION]\n'
                            'greeting = Hello\n'
                            'name = World\n\n')

    config2 = Config()
    config2.configparser.load(config_ini, as_defaults=True)

    assert config1.dump_values() == config2.dump_values() == {
        'greeting': 'Hello',
        'name': 'World'
    }
Esempio n. 7
0
def test_assigning_nameless_item_directly_to_config_should_set_its_name():
    config = Config()
    config.dummy = Config()
    config.dummy.x = Item(value=5)
    assert config.dummy.x.name == 'x'

    config.dummy['y'] = Item(default=True)
    assert config.dummy.y.name == 'y'

    assert config.dump_values() == {'dummy': {'x': 5, 'y': True}}
Esempio n. 8
0
def test_config_written_to_and_read_from_yaml_file(yaml_path1):
    config = Config({
        'uploads': {
            'enabled': True,
            'threads': 5,
            'db': {
                'user': '******',
            },
        },
    })
    original_values = config.dump_values()

    config.yaml.dump(yaml_path1, with_defaults=True)

    config.yaml.load(yaml_path1)
    assert config.dump_values() == original_values

    config2 = Config()
    config2.yaml.load(yaml_path1, as_defaults=True)
    assert config2.dump_values() == original_values
def test_dict_and_module_based_config_schema(app_config_dict_example, app_config_module_example):
    dict_config = Config(app_config_dict_example)
    module_config = Config(app_config_module_example)
    assert dict_config.dump_values(with_defaults=True) == module_config.dump_values(with_defaults=True)
Esempio n. 10
0
def main(args):
    """Main entry point allowing external calls

    Args:
      args ([str]): command line parameter list
    """



    args = parse_args(args)
    setup_logging(args.loglevel)

    if args.generate_key:
        LOGGER.info("Generating a key pair")
        generate_keypair(args.print_key, args.key_path)
        return

    LOGGER.info("Loading configuration")
    config = Config(schema=get_defaults())
    app['config'] = config

    if args.config_file is not None:
        LOGGER.debug("Loading config file '%s'", args.config_file)
        app['config'].yaml.load(args.config_file)

    if (not config.p2p.key.value) and args.key_path:
        LOGGER.debug("Loading key pair from file")
        with open(args.key_path, 'r') as key_file:
            config.p2p.key.value = key_file.read()

    if not config.p2p.key.value:
        LOGGER.critical("Node key cannot be empty")
        return

    if args.port:
        config.aleph.port.value = args.port
    if args.host:
        config.aleph.host.value = args.host

    if args.sentry_disabled:
        LOGGER.info("Sentry disabled by CLI arguments")
    elif app['config'].sentry.dsn.value:
        sentry_sdk.init(
            dsn=app['config'].sentry.dsn.value,
            traces_sample_rate=app['config'].sentry.traces_sample_rate.value,
            ignore_errors=[KeyboardInterrupt],
        )
        LOGGER.info("Sentry enabled")

    config_values = config.dump_values()

    LOGGER.debug("Initializing database")
    model.init_db(config, ensure_indexes=(not args.debug))
    LOGGER.info("Database initialized.")

    # filestore.init_store(config)
    # LOGGER.info("File store initalized.")
    init_cors()  # FIXME: This is stateful and process-dependent
    set_start_method('spawn')
    manager = None
    if config.storage.engine.value == 'rocksdb':
        # rocksdb doesn't support multiprocess/multithread
        manager = prepare_manager(config_values)

    with Manager() as shared_memory_manager:
        tasks: List[Coroutine] = []
        # This dictionary is shared between all the process so we can expose some internal stats
        # handle with care as it's shared between process.
        shared_stats = shared_memory_manager.dict()
        if not args.no_jobs:
            LOGGER.debug("Creating jobs")
            tasks += start_jobs(config, shared_stats=shared_stats,
                                manager=manager, use_processes=True)

        loop = asyncio.get_event_loop()

        # handler = app.make_handler(loop=loop)
        LOGGER.debug("Initializing p2p")
        f = p2p.init_p2p(config)
        p2p_tasks = loop.run_until_complete(f)
        tasks += p2p_tasks
        LOGGER.debug("Initialized p2p")

        LOGGER.debug("Initializing listeners")
        tasks += listener_tasks(config)
        tasks += connector_tasks(config, outgoing=(not args.no_commit))
        LOGGER.debug("Initialized listeners")

        # Need to be passed here otherwise it get lost in the fork
        from aleph.services.p2p import manager as p2p_manager
        extra_web_config = {
            'public_adresses': p2p_manager.public_adresses
        }

        p1 = Process(target=run_server_coroutine, args=(
            config_values,
            config.p2p.host.value,
            config.p2p.http_port.value,
            manager and (manager._address, manager._authkey) or None,
            3,
            shared_stats,
            args.sentry_disabled is False and app['config'].sentry.dsn.value,
            extra_web_config,

        ))
        p2 = Process(target=run_server_coroutine, args=(
            config_values,
            config.aleph.host.value,
            config.aleph.port.value,
            manager and (manager._address, manager._authkey) or None,
            4,
            shared_stats,
            args.sentry_disabled is False and app['config'].sentry.dsn.value,
            extra_web_config
        ))
        p1.start()
        p2.start()
        LOGGER.debug("Started processes")

        # fp2p = loop.create_server(handler,
        #                           config.p2p.host.value,
        #                           config.p2p.http_port.value)
        # srvp2p = loop.run_until_complete(fp2p)
        # LOGGER.info('Serving on %s', srvp2p.sockets[0].getsockname())

        # f = loop.create_server(handler,
        #                        config.aleph.host.value,
        #                        config.aleph.port.value)
        # srv = loop.run_until_complete(f)
        # LOGGER.info('Serving on %s', srv.sockets[0].getsockname())
        LOGGER.debug("Running event loop")
        loop.run_until_complete(asyncio.gather(*tasks))
Esempio n. 11
0
def main(args):
    """Main entry point allowing external calls

    Args:
      args ([str]): command line parameter list
    """

    # uvloop.install()
    args = parse_args(args)
    if args.generate_key:
        setup_logging(logging.INFO)
        generate_keypair(print_info=True)
        return

    setup_logging(args.loglevel)
    LOGGER.info("Starting up.")

    config = Config(schema=get_defaults())
    app['config'] = config

    config.aleph.port.value = args.port
    config.aleph.host.value = args.host

    if args.config_file is not None:
        app['config'].yaml.load(args.config_file)

    config_values = config.dump_values()

    model.init_db(config, ensure_indexes=(not args.debug))
    LOGGER.info("Database initialized.")

    # filestore.init_store(config)
    # LOGGER.info("File store initalized.")

    init_cors()
    set_start_method('spawn')
    manager = None
    if config.storage.engine.value == 'rocksdb':
        # rocksdb doesn't support multiprocess/multithread
        manager = prepare_manager(config_values)

    if not args.no_jobs:
        start_jobs(config, manager=manager, use_processes=False)

    loop = asyncio.get_event_loop()
    # handler = app.make_handler(loop=loop)
    f = p2p.init_p2p(config)
    host = loop.run_until_complete(f)

    setup_listeners(config)
    start_connector(config, outgoing=(not args.no_commit))

    p1 = Process(target=run_server,
                 args=(config_values, config.p2p.host.value,
                       config.p2p.http_port.value,
                       manager and (manager._address, manager._authkey)
                       or None, 3))
    p2 = Process(target=run_server,
                 args=(config_values, config.aleph.host.value,
                       config.aleph.port.value,
                       manager and (manager._address, manager._authkey)
                       or None, 4))
    p1.start()
    p2.start()

    # fp2p = loop.create_server(handler,
    #                           config.p2p.host.value,
    #                           config.p2p.http_port.value)
    # srvp2p = loop.run_until_complete(fp2p)
    # LOGGER.info('Serving on %s', srvp2p.sockets[0].getsockname())

    # f = loop.create_server(handler,
    #                        config.aleph.host.value,
    #                        config.aleph.port.value)
    # srv = loop.run_until_complete(f)
    # LOGGER.info('Serving on %s', srv.sockets[0].getsockname())
    loop.run_forever()
Esempio n. 12
0
def test_json_read_and_write(defaults_json_path, user_json_path):
    c1 = Config()
    c1.json.load(defaults_json_path, as_defaults=True)

    c2 = Config()
    c2.json.load([defaults_json_path], as_defaults=True)

    c3 = Config()
    with open(defaults_json_path) as f:
        c3.json.load(f, as_defaults=True)

    assert c1.dump_values(with_defaults=False) == {}
    assert c1.dump_values(with_defaults=True) == {
        'uploads': {
            'threads': 1,
            'enabled': False,
            'tmp_dir': '/tmp',
        }
    }

    assert c1.dump_values() == c2.dump_values() == c3.dump_values()

    c1.json.load(user_json_path)
    c2.json.load([user_json_path])
    with open(user_json_path) as f:
        c3.json.load(f)

    assert c1.dump_values(with_defaults=False) == {
        'uploads': {
            'threads': 5,
            'enabled': True,
        }
    }
    assert c1.dump_values() == c2.dump_values() == c3.dump_values()

    updates = {
        'uploads': {
            'threads': 10,
            'enabled': False,
        }
    }

    c1.load_values(updates)
    c2.load_values(updates)
    c3.load_values(updates)

    assert c1.dump_values() == c2.dump_values() == c3.dump_values()

    c1.json.dump(user_json_path)
    c2.json.load(user_json_path)
    assert c1.dump_values() == c2.dump_values() == c3.dump_values()

    assert c1.dump_values() == c2.dump_values() == c3.dump_values()

    with open(user_json_path, 'w') as f:
        c2.json.dump(f)
    c1.json.load(user_json_path)

    assert c1.dump_values() == c2.dump_values() == c3.dump_values()
Esempio n. 13
0
class Graphistry(object):
    config = None
    config_json = None

    def __init__(self):
        self.config_file = os.path.join(config_location(), 'config.json')

    def get_config(self, schema=None):
        if not schema:
            schema = CONFIG_SCHEMA
        self.config = Config(schema=schema)

        return self.config

    def create_bcrypt_container(self):
        make_bcrypt = join(cwd, 'bootstrap/make-bcrypt-contianer.sh')
        local('sudo bash {mb}'.format(mb=make_bcrypt))

    def load_investigations(self, airgapped=False):
        """
        This will load the pivotdb test data.
        :return:
        """
        self.load_config(airgapped)
        pivot_db = '.pivot-db'
        invest_dir = join(pivot_db, 'investigations')
        pivots_dir = join(pivot_db, 'pivots')

        print(pivot_db)
        if not os.path.exists(pivot_db):
            local('mkdir -p ' + pivot_db)

        if not os.path.exists(invest_dir):
            local('mkdir -p ' + invest_dir)

        if not os.path.exists(pivots_dir):
            local('mkdir -p ' + pivots_dir)

        with hide('output', 'running', 'warnings'), settings(warn_only=True):
            local('sudo chmod -R 777 ' + pivot_db)

        for inv in self.config.investigations.value:
            try:
                print("writing investigation {0}".format(inv['name']))
                investigation = join(invest_dir, '{id}.json'.format(id=inv['json']['id']))
                with open(investigation, 'w') as outfile:
                    json.dump(inv['json'], outfile, ensure_ascii=False, indent=4, sort_keys=True)

                for piv in inv['pivots']:
                    print("writing pivot {0}".format(piv['name']))
                    pivot = join(pivots_dir, '{id}.json'.format(id=piv['json']['id']))
                    with open(pivot, 'w') as outfile:
                        json.dump(piv['json'], outfile, ensure_ascii=False, indent=4, sort_keys=True)
            except Exception as e:
                print(e)

        with hide('output', 'running', 'warnings'), settings(warn_only=True):
            local('sudo chmod -R 777 ' + pivot_db)

    def login(self):
        toolbar_quip = revisionist_commit_history_html()
        username = prompt('username: '******'password: '******'{0}/api/v1/config/?format=json'.format(SHIPYARD_HOST),
                           auth=HTTPBasicAuth(username, password))

        if res.status_code == 401:
            print('Invalid Username/Password')
            sys.exit()
        elif res.status_code == 403:
            print('You do not have permissions to do this action. Ask your administrator to upgrade your account.')
            sys.exit()

        conf = res.json()['results'][0]
        schema = {
            'user': conf['user'],
            'default_deployment': conf['default_deployment'],
            'investigations': conf['default_deployment']['investigations'],
            'registry_credentials': conf['default_deployment']['registry_credentials'],
            'vizapp_container': conf['default_deployment']['vizapp_container'],
            'pivotapp_container': conf['default_deployment']['pivotapp_container'],
            'is_airgapped': False,
            'api_canary': '',
            'api_secret': '',
            'graphistry_key': '',
            'graphistry_host': '',
            'es_host': '',
            'es_port': '9200',
            'es_protocol': 'http',
            'es_auth': '',
            'splunk_host': '',
            'splunk_web_port': '443',
            'splunk_api_port': '8089',
            'splunk_protocol': 'https',    
            'splunk_user': '',
            'splunk_password': '',
            'ip_internal_accept_list': '',
            'http_user': '',
            'http_password_hash': '',
            's3_access': '',
            's3_secret': '',


        }
        self.get_config(schema)

    def load_config(self, airgapped=False):
        print("Loading Config")
        if exists(self.config_file):
            self.config = Config(
                schema=CONFIG_SCHEMA,
                load_sources=[self.config_file],
                auto_load=True,
            )
            self.config.json.load(self.config_file)
        elif airgapped:
            self.get_config(CONFIG_SCHEMA)
        else:
            self.login()
        return self.config

    def template_config(self, airgapped=False):
        toolbar_quip = revisionist_commit_history_html()
        self.load_config(airgapped)

        self.config.is_airgapped.value = airgapped

        
        # Graphistry API Key Generation
        click.secho("[graphistry] Configure API key generation. [Hash algorithm is 'aes-256-cbc'.]", fg="yellow")
        
        api_canary = prompt('Hash Canary string (enter to autogenerate): ', bottom_toolbar=toolbar_quip)
        api_secret = prompt('Your Secret string (enter to autogenerate): ', bottom_toolbar=toolbar_quip)

        if api_canary == '':
            self.config.api_canary.value = id_generator(10)

        if api_secret == '':
            self.config.api_secret.value = id_generator(10)

        #self.config.graphistry_key.value = prompt('You supplied Graphisty key: ',
        #                                           bottom_toolbar=toolbar_quip)


        # Elasticsearch
        click.secho("[graphistry] Configure connectors", fg="yellow")
        self.config.es_host.value = prompt('Your Elasticsearch Host, e.g., elk.company.com (enter to skip): ',
                                           bottom_toolbar=toolbar_quip)
        if self.config.es_host.value != '':
            self.config.es_port.value = prompt('Your Elasticsearch Port [default: 9200]: ',
                                               bottom_toolbar=toolbar_quip)
            if self.config.es_port.value == '':
                self.config.es_port.value = CONFIG_SCHEMA['es_port']

            self.config.es_protocol.value = prompt('Your Elasticsearch Protocol [default: http]: ',
                                               bottom_toolbar=toolbar_quip)
            if self.config.es_protocol.value == '':
                self.config.es_protocol.value = CONFIG_SCHEMA['es_protocol']

            self.config.es_auth.value = prompt('Your Elasticsearch auth, e.g., user:password [default: none]: ',
                                               bottom_toolbar=toolbar_quip)
            if self.config.es_auth.value == '':
                self.config.es_auth.value = CONFIG_SCHEMA['es_auth']


        # Splunk
        self.config.splunk_host.value = prompt('Your Splunk Host, e.g., www.splunk.com (enter to skip): ',
                                               bottom_toolbar=toolbar_quip)
        if self.config.splunk_host.value != '':
            
            #### DEFAULTS
            self.config.splunk_api_port.value = prompt('Your Splunk API Port [default: 8089]: ',
                bottom_toolbar=toolbar_quip)
            if self.config.splunk_api_port.value == '':
                self.config.splunk_api_port.value = CONFIG_SCHEMA['splunk_api_port']

            self.config.splunk_web_port.value = prompt('Your Splunk Web UI Port [default: 443]: ',
                bottom_toolbar=toolbar_quip)
            if self.config.splunk_web_port.value == '':
                self.config.splunk_web_port.value = CONFIG_SCHEMA['splunk_web_port']

            self.config.splunk_protocol.value = prompt('Your Splunk Protocol, e.g., https [default: https]: ',
                bottom_toolbar=toolbar_quip)
            if self.config.splunk_protocol.value == '':
                self.config.splunk_protocol.value = CONFIG_SCHEMA['splunk_protocol']


            self.config.splunk_user.value = prompt('Your Splunk Username: '******'Your Splunk Password: '******'Your FQDN for this deployment, including protocol [e.g., http://graphistry.yourcompany.com]: ',
                                                   bottom_toolbar=toolbar_quip)

        # Ip Whitelist
        self.config.ip_internal_accept_list.value = prompt('Your Internal IP Accept Whitelist (beyond typical RFC 1918)'
                                                           ', ex:["127.0.0.1", "10.*"]',
                                                           bottom_toolbar=toolbar_quip)

        # Http Ingress
        self.config.http_user.value = prompt('HTTP Ingress Username: '******'HTTP Ingress Password: '******'output', 'running', 'warnings'), settings(warn_only=True):
            self.config.http_password_hash.value = local('docker run -it bcrypt bcrypt-cli "{0}" 10'.format(password),
                                                         capture=True)

        # AWS
        self.config.s3_access.value = prompt('AWS Access Key ID (enter to skip): ', bottom_toolbar=toolbar_quip)
        if self.config.s3_access.value != '':
            self.config.s3_secret.value = prompt('AWS Access Key Secret: ', bottom_toolbar=toolbar_quip)

        self.save_config()
        self.write_configs()

    def write_configs(self):
        jenv = Environment()
        jenv.filters['jsonify'] = json.dumps
        templates = ['pivot-config.json', 'httpd-config.json', 'viz-app-config.json']
        for tmpl in templates:
            _file = open(cwd+'/templates/'+tmpl, "r")
            create_config_files(tmpl, jenv.from_string(_file.read()).render(self.config.dump_values()))
            _file.close()

    def registry_auth(self):
        # Just snag whatever credentials are in the config and make sure the key is saved.
        # docker-py doesn't seem to want to accept the JSON string as a password so this works.
        key_filename = os.path.join(expanduser('~/.config/graphistry'), '.registrykey.json')
        registry_credentials = json.dumps(dict(self.config.default_deployment.value['registry_credentials']))
        _file = open(key_filename, "w")
        _file.write(registry_credentials)
        _file.close()

        local('docker login -u _json_key -p "$(cat {0})" https://us.gcr.io | tee'.format(key_filename))

    def save_config(self):
        print("Saving Config")
        location = config_location()

        ensure_dir_exists(location)

        if self.config:
            self.config.json.dump(self.config_file, with_defaults=True)
            print("Wrote config:", self.config_file)
Esempio n. 14
0
def test_nested_config():
    """
    This demonstrates how an application config can be created from multiple
    sections (which in turn can be created from others).
    """

    # Declaration of a config section may be a plain dictionary
    db_config = {
        'host': 'localhost',
        'user': '******',
        'password': '******',
    }

    # Or, it may be an already functional instance of Config
    server_config = Config({
        'port': 8080,
    })

    #
    # All these sections can be combined into one config:
    #
    config = Config({
        'db': db_config,
        'server': server_config,
        'greeting':
        'Hello',  # and you can have plain config items next to sections
    })

    # You can load values
    assert config.greeting.value == 'Hello'

    # Your original schemas are safe -- db_config dictionary won't be changed
    config.db.user.value = 'root'
    assert config.db.user.value == 'root'
    assert db_config['user'] == 'admin'

    # You can also change values by reading them from a dictionary.
    # Unknown names will be ignored unless you pass as_defaults=True
    # but in that case you will overwrite any previously existing items.
    config.load_values({
        'greeting': 'Good morning!',
        'comments': {
            'enabled': False
        }
    })
    assert config.greeting.value == 'Good morning!'
    assert 'comments' not in config

    # You can check if config value is the default value
    assert not config.db.user.is_default
    assert config.server.port.is_default

    # Or if it has any value at all
    assert config.server.port.has_value

    # Iterate over all items (recursively)
    all = dict(config.iter_items(recursive=True))
    assert all[('db', 'host')] is config.db.host
    assert all[('server', 'port')] is config.server.port

    # Export all values
    config_dict = config.dump_values()
    assert config_dict['db'] == {
        'host': 'localhost',
        'user': '******',
        'password': '******'
    }

    # Each section is a Config instance too, so you can export those separately too:
    assert config.server.dump_values() == config_dict['server']

    # You can reset individual items to their default values
    assert config.db.user.value == 'root'
    config.db.user.reset()
    assert config.db.user.value == 'admin'

    # Or sections
    config.db.user.value = 'root_again'
    assert config.db.user.value == 'root_again'
    config.db.reset()
    assert config.db.user.value == 'admin'

    # Or you can reset all configuration and you can make sure all values match defaults
    assert not config.is_default
    config.reset()
    assert config.is_default
Esempio n. 15
0
def test_simple_config():
    # Initialisation of a config manager
    config = Config({
        'greeting': 'Hello, world!',
        'threads': 1,
        'throttling_enabled': False,
    })

    # Attribute-based and key-based access to config items
    assert config.greeting is config['greeting']

    # Every config item is an instance of Item
    assert isinstance(config.greeting, Item)

    # Value and other attribute access on Item
    assert config.greeting.value == 'Hello, world!'
    assert config.threads.value == 1
    assert config.threads.type == Types.int
    assert config.throttling_enabled.value is False
    assert config.throttling_enabled.type == Types.bool

    # If you are working with items which don't have default values, you can use .get() method
    # which accepts fallback value:
    assert config.greeting.get() == 'Hello, world!'
    assert config.greeting.get('Hey!') == 'Hello, world!'

    # Can check if a config item is managed by the manager
    assert 'greeting' in config
    assert 'warning' not in config

    # Can change values
    config.greeting.value = 'Good evening!'
    assert config.greeting.value == 'Good evening!'

    # Can inspect default value
    assert config.greeting.default == 'Hello, world!'

    # Can export all values to a dictionary
    assert config.dump_values() == {
        'greeting': 'Good evening!',
        'threads': 1,
        'throttling_enabled': False,
    }

    # Can iterate over all items
    items = dict(config.iter_items(recursive=True))
    assert len(items) == 3
    assert items[('greeting', )] is config.greeting
    assert items[('threads', )] is config.threads
    assert items[('throttling_enabled', )] is config.throttling_enabled

    # Requesting unknown config raises NotFound
    with pytest.raises(NotFound):
        assert not config.other_things

    # Cannot change item value incorrectly
    with pytest.raises(TypeError):
        config.greeting = 'Bye!'

    with pytest.raises(TypeError):
        config['greeting'] = 'Bye!'