Ejemplo n.º 1
0
def yaml_safe_load_with_line_numbers(
    filestream: IO[str], filename: str
) -> Tuple[object, Dict]:
    loader = yaml.SafeLoader(filestream)
    line_numbers: Dict[Any, LineTracker] = {}

    def compose_node(parent, index):
        # the line number where the previous token has ended (plus empty lines)
        line = loader.line
        node = Composer.compose_node(loader, parent, index)
        node.__line__ = line + 1
        return node

    def construct_mapping(node, deep=False):
        mapping = SafeConstructor.construct_mapping(loader, node, deep=deep)
        mapping["__line__"] = LineTracker(filename, node.__line__)
        return mapping

    def construct_scalar(node):
        scalar = SafeConstructor.construct_scalar(loader, node)
        key = id(scalar)
        if not line_numbers.get(key):
            line_numbers[key] = LineTracker(filename, node.__line__)
        else:
            line_numbers[key] = SHARED_OBJECT
        return scalar

    loader.compose_node = compose_node  # type: ignore
    loader.construct_mapping = construct_mapping  # type: ignore
    loader.construct_scalar = construct_scalar  # type: ignore
    return loader.get_single_data(), line_numbers
Ejemplo n.º 2
0
    def read(self) -> NoReturn:
        self.config_path = self._get_from_environ("CONFIG_PATH")
        self.environment = self._get_from_environ("ENVIRONMENT")

        with open(self.config_path, 'r') as stream_file:
            file = yaml.SafeLoader(stream_file).get_data()
        self.data.update(file[self.environment.lower()])
Ejemplo n.º 3
0
def main(yaml_file, sqlite_file):
    features.yaml.check()
    import yaml
    with open(yaml_file) as infile:
        loader = yaml.SafeLoader(infile)
        loader.check_node()
        loader.get_event()
        node = loader.compose_node(None, None)
        schema_names = dict(
            (json.dumps(list(map(loader.construct_sequence, v.value))), k)
            for k, v in loader.anchors.items())
        config = loader.construct_document(node)
    db = sqlite3.connect(sqlite_file)
    sites = config.pop('sites', {})
    config.pop('schemata', None)
    default_schema = sites['default']['schema']
    schemata = {}
    config_rows = [(None, k, json.dumps(v)) for k, v in config.items()]
    for site, site_config in sites.items():
        schema = json.dumps(site_config.pop('schema', default_schema))
        schemata.setdefault(schema, []).append(site)
        config_rows.extend(
            (site, k, json.dumps(v)) for k, v in site_config.items())
    for e, schema in enumerate(sorted(schemata)):
        schema_names.setdefault(schema, 'schema_%d' % e)
    curs = db.cursor()
    curs.executemany('INSERT INTO schemata (name, value) VALUES (?, ?)',
                     ((name, schema) for schema, name in schema_names.items()))
    curs.executemany(
        'INSERT INTO sites (site_name, schema_id) SELECT ?, schema_id FROM schemata WHERE value = ?',
        ((site, value) for value, sites in schemata.items() for site in sites))
    curs.executemany(
        'INSERT INTO config_values (site_name, name, value) VALUES (?, ?, ?)',
        config_rows)
    db.commit()
Ejemplo n.º 4
0
    def read(self) -> TDict:
        """
        :return: dict
        """

        with open(self.config_path, 'r') as file:
            data = yaml.SafeLoader(file).get_data()
        return data[self.environment]
Ejemplo n.º 5
0
def parse_job():
    filename = find_file(NETTILE_PATTERN)
    with open(filename, 'r') as f:
        loader = yaml.SafeLoader(f)
        key = loader.get_data()
        meta = loader.get_data()
        job = meta[0]['jobid']
        job = job[:job.index('.')]
        return job
Ejemplo n.º 6
0
def _load_yaml_with_clear_tag(stream):
    """Like yaml.safe_load(), but everything with a !clear tag before it
    will be wrapped in ClearedValue()."""
    loader = yaml.SafeLoader(stream)
    loader.add_constructor('!clear', _cleared_value_constructor)
    try:
        return loader.get_single_data()
    finally:
        if hasattr(loader, 'dispose'):  # it doesn't in PyYAML 3.09
            loader.dispose()
Ejemplo n.º 7
0
 def _parse_config(self, path, config=None):
     with open(path, "r") as f:
         l = yaml.SafeLoader(f)
         c = l.get_data()
         h = l.line + 1
         l.dispose()
     if isinstance(config, ChainMap):
         c = config.new_child(c)
     elif isinstance(config, Mapping):
         c = ChainMap(c, config)
     return c, h
Ejemplo n.º 8
0
    def __init__(self, file, exclude_lines_regex=None):
        """
        :type file: file object

        :type exclude_lines_regex: regex object
        :param exclude_lines_regex: optional regex for ignored lines.
        """
        self.content = file.read()
        self.exclude_lines_regex = exclude_lines_regex

        self.loader = yaml.SafeLoader(self.content)
        self.loader.compose_node = self._compose_node_shim
Ejemplo n.º 9
0
def get_all_objects(text, beginning=r'{'):
    import yaml
    # TODO?: somehow optimise the slicing?
    for from_ in indexall_re(text, beginning):
        loader = yaml.SafeLoader(StringIO(text))
        loader.forward(from_)
        # loader.update(from_)
        try:
            part_res = loader.get_data()
        except Exception:
            continue
        assert isinstance(part_res, dict)
        yield part_res
Ejemplo n.º 10
0
def compare_session(quteproc, expected):
    """Compare the current sessions against the given template.

    partial_compare is used, which means only the keys/values listed will be
    compared.
    """
    # Translate ... to ellipsis in YAML.
    loader = yaml.SafeLoader(expected)
    loader.add_constructor('!ellipsis', lambda loader, node: ...)
    loader.add_implicit_resolver('!ellipsis', re.compile(r'\.\.\.'), None)

    data = quteproc.get_session()
    expected = loader.get_data()
    assert utils.partial_compare(data, expected)
Ejemplo n.º 11
0
def check_inline_mapping(line):
    loader = yaml.SafeLoader(line.content)
    try:
        while loader.peek_token():
            if isinstance(loader.get_token(), yaml.BlockMappingStartToken):
                while loader.peek_token():
                    if isinstance(loader.get_token(), yaml.ValueToken):
                        t = loader.get_token()
                        if isinstance(t, yaml.ScalarToken):
                            return (' '
                                    not in line.content[t.start_mark.column:])
    except yaml.scanner.ScannerError:
        pass

    return False
Ejemplo n.º 12
0
def load_meta(filename):
    """Reads the run file with the meta data. This files is composed
       of an arbitrary number of YAML documents. The first document
       is the global meta information. All following documents describe
       other files or objects.
    """
    import yaml
    input = open(filename, 'r')

    loader = yaml.SafeLoader(input)  # No python-specific yaml
    if loader.check_data():
        meta = loader.get_data()

    filelist = []
    while loader.check_data():  # There's another document
        filelist.append(loader.get_data())

    return meta, filelist
Ejemplo n.º 13
0
    def compare_session(self, expected):
        """Compare the current sessions against the given template.

        partial_compare is used, which means only the keys/values listed will
        be compared.
        """
        __tracebackhide__ = lambda e: e.errisinstance(pytest.fail.Exception)
        # Translate ... to ellipsis in YAML.
        loader = yaml.SafeLoader(expected)
        loader.add_constructor('!ellipsis', lambda loader, node: ...)
        loader.add_implicit_resolver('!ellipsis', re.compile(r'\.\.\.'), None)

        data = self.get_session()
        expected = loader.get_data()
        outcome = testutils.partial_compare(data, expected)
        if not outcome:
            msg = "Session comparison failed: {}".format(outcome.error)
            msg += '\nsee stdout for details'
            pytest.fail(msg)
Ejemplo n.º 14
0
def yaml_load(stream):
    """Load YAML file using safe loader."""
    # Surprisingly, the CSafeLoader does not seem to be used by default.
    # Check whether the CSafeLoader is available and provide a log message
    # if it is not available.
    global _HAS_YAML_LIBRARY

    if _HAS_YAML_LIBRARY is None:
        _HAS_YAML_LIBRARY = hasattr(yaml, 'CSafeLoader')
        if not _HAS_YAML_LIBRARY:
            logger.warning('libyaml was not found! Please install libyaml to'
                           ' speed up loading the model files.')

    if _HAS_YAML_LIBRARY:
        loader = yaml.CSafeLoader(stream)
    else:
        loader = yaml.SafeLoader(stream)
    loader.add_constructor('tag:yaml.org,2002:float', float_constructor)
    return loader.get_data()
Ejemplo n.º 15
0
def load_build_file(build_file_content, valid_services=VALID_SERVICES):
    """ Load and parse the build description contained in the build file """
    stream = StringIO(build_file_content)
    stream.name = 'dotcloud.yml'  # yaml load will use this property
    # to generate proper error marks.

    yaml_loader = yaml.SafeLoader(stream)

    # Check yaml syntax and load ast
    ast = yaml_loader.get_single_node()

    if ast is None:
        raise ValueError('"dotcloud.yml" is empty!')

    # Validate ast against dotcloud.yml schema.
    validate_ast_schema(ast, valid_services)

    # Now construct python object...
    desc = yaml_loader.construct_document(ast)

    # Force service name to be of type str
    desc = dict((str(k), v) for k, v in desc.iteritems())

    # for each services description
    for service_name, service_desc in desc.iteritems():
        # Check for conflicting options
        if service_desc.get('process') and service_desc.get('processes'):
            raise ValueError(
                'You can\'t have both "process" and "processes" at '
                'the same time in service "{0}"'.format(service_name))

        # Inject defaults values if necessary
        for def_name, def_node in _schema.subnode['*'].subnode.items():
            if def_node.default is None:
                continue
            if def_name not in service_desc:
                service_desc[def_name] = def_node.default

    return desc
Ejemplo n.º 16
0
def read_header(filename):
    """Reads header information on a data table file.

       The header information can optionally have one (1) document with
       meta information. If this document exists, it must come before
       the dtype information for the table.
    """
    import yaml
    input = open(filename, 'r')

    loader = yaml.SafeLoader(input)  # No python-specific yaml

    # Where we're storing the meta data
    meta = loader.get_data()

    # List of tuples
    if loader.check_token(yaml.DocumentStartToken):
        dtype = loader.get_data()
    else:
        dtype = meta
        meta = None

    dtype = convert_dtype(dtype)
    return meta, dtype
    def __init__(self, file):
        self.loader = yaml.SafeLoader(file.read())

        self.loader.compose_node = self._compose_node_shim
Ejemplo n.º 18
0
    def __init__(self, file: NamedIO):
        self.content = file.read()

        self.loader = yaml.SafeLoader(self.content)
        self.loader.compose_node = self._compose_node_shim  # type: ignore
Ejemplo n.º 19
0
except ImportError:
    sys.exit(
        "Please install pyyaml. (In some package managers, this is called python-yaml or python3-yaml.)"
    )

# Why is this script necessary? Well, the different jobs run in the CI build have a lot of shared steps.
# To avoid duplication, the CI YAML file relies on a lot of anchors (look up "YAML anchors" for details) to
# riuse pieces of information
# Unfortunately, GitHub does not support YAML anchors in workflow files:
# https://github.community/t/support-for-yaml-anchors/16128/33
# The only way to get it to work is to have an intermediary script like this.

repo_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

with open(os.path.join(repo_dir, "ci/spec.yml")) as f:
    loader = yaml.SafeLoader(f)

    loader.bool_values = dict(yaml.SafeLoader.bool_values)
    # GitHub Actions has an "on" key that gets translated to 'true' without this.
    loader.bool_values.update({"on": "on"})

    ci_spec = loader.get_single_data()

ci_spec.pop(".anchors", None)

with open(os.path.join(repo_dir, ".github/workflows/ci.yml"), "w") as f:
    f.write(
        "# WARNING: Do not edit this file manually! Edit ci/spec.yml and run ci/regen-workflow.py.\n\n"
    )

    json.dump(ci_spec, f)
Ejemplo n.º 20
0
import yaml

config = yaml.load('conf.yaml', yaml.SafeLoader())


def run(request):
    args = request.args
    payload = request.content
Ejemplo n.º 21
0
    def parse(self, content, device, job_id, logger, dispatcher_config,
              env_dut=None):
        self.loader = yaml.SafeLoader(content)
        self.loader.compose_node = self.compose_node
        self.loader.construct_mapping = self.construct_mapping
        data = self.loader.get_single_data()
        job = CCMJob(job_id, data, logger)
        test_counts = {}
        job.device = device
        job.parameters['env_dut'] = env_dut
        # Load the dispatcher config
        job.parameters['dispatcher'] = {}
        if dispatcher_config is not None:
            config = yaml.safe_load(dispatcher_config)
            if isinstance(config, dict):
                job.parameters['dispatcher'] = config

        def deploy_local_source( src_path ):
            """ 
            Deploy rootfs/kernel/dtb/packages, local can be access easily.
            src_path = '/var/www/lava/source/qemu/kvm-debian-wheezy.img.gz' 
            """
            debug=True
            def print_debug(msg=''):
                if debug:
                    print(msg, file=open('/tmp/deploy_local_source.txt', 'a'))
                else:
                    print(msg)
            import os
            import shutil
            import random
            dst_path = ''.join(src_path.split('source/'))
            is_repo = False
            latest_path = None
            if os.path.isdir(src_path):
                for root, dirs, files in os.walk(src_path):
                    if 'repodata' in dirs:
                        is_repo = True
                        break

            if 'latest' in src_path:
                path_list = src_path.split('latest')
                src_link_path = path_list[0]+'latest'
                src_real_path = os.path.realpath(src_link_path)
                dst_link_path = ''.join(src_link_path.split('source'))
                dst_real_path = ''.join(src_real_path.split('source'))
                index = src_path.split('/').index('latest')
                link_dir = '/'+src_path.split('/')[index+1]
            else:
                src_real_path = os.path.dirname(src_path)
                dst_real_path = os.path.dirname(dst_path)
                link_dir = '/'+os.path.basename(src_path)

            os.makedirs(dst_real_path, exist_ok=True)

            if 'latest' in src_path:
                link_cmd = "ln -rs %s %s" % (dst_real_path, dst_link_path)
                print_debug(link_cmd)
                if os.path.exists(dst_link_path):
                    print_debug('dst_real_path:'+dst_real_path + '\ndst_link_path:'+dst_link_path)
                    if os.path.realpath(dst_real_path) != os.path.realpath(dst_link_path):
                        os.remove(dst_link_path)
                        os.system(link_cmd)
                else:
                    os.system(link_cmd)
            if is_repo and not os.path.exists(dst_real_path+link_dir):
                link_cmd = "ln -rs %s %s" % (src_real_path+link_dir, dst_real_path+link_dir)
                os.system(link_cmd)
            else:
                dir = os.path.dirname(dst_path)
                os.makedirs(dir, exist_ok=True)
                if not os.path.exists(dst_path):
                    end = str(random.random()).replace('.','')
                    shutil.copy(src_path, dst_path+end)
                    if not os.path.exists(dst_path):
                        shutil.move(dst_path+end, dst_path)
                    else:
                        os.remove(dst_path+end)
                print_debug('src_path :'+src_path + '\ndst_path:'+dst_path)

        # replace source http server as dispatcher server.
        import re
        def redefine_def_url( param ):
            if type(param) is dict:
                for i in param.keys():
                    param[i] = redefine_def_url(param[i])
            elif type(param) is list:
                for j in param:
                    j = redefine_def_url(j)
            elif type(param) is str:
                tmp = dispatcher_ip.split(':')
                if len(tmp) == 2:
                    tmp_ip = tmp[0]+':18080'
                else:
                    tmp_ip = tmp[0]+':8080'
                if 'http://pek-lava.wrs.com' in param:
                    src_path = re.sub('http:.*8080', '/var/www/lava/source', param)
                    deploy_local_source(src_path)
                    param = re.sub('http:.*8080', 'http://'+tmp_ip, param)
            return param

        dispatcher_ip =job.parameters['dispatcher']['dispatcher_ip']
        redefine_def_url(job.parameters)

        level_tuple = Protocol.select_all(job.parameters)
        # sort the list of protocol objects by the protocol class level.
        job.protocols = [item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])]
        pipeline = Pipeline(job=job)
        self._timeouts(data, job)

        # deploy and boot classes can populate the pipeline differently depending
        # on the test action type they are linked with (via namespacing).
        # This code builds an information dict for each namespace which is then
        # passed as a parameter to each Action class to use.
        test_info = {}
        test_actions = ([action for action in data['actions'] if 'test' in action])
        for test_action in test_actions:
            test_parameters = test_action['test']
            test_type = LavaTest.select(device, test_parameters)
            namespace = test_parameters.get('namespace', 'common')
            connection_namespace = test_parameters.get('connection-namespace', namespace)
            if namespace in test_info:
                test_info[namespace].append({'class': test_type, 'parameters': test_parameters})
            else:
                test_info.update({namespace: [{'class': test_type, 'parameters': test_parameters}]})
            if namespace != connection_namespace:
                test_info.update({connection_namespace: [{'class': test_type, 'parameters': test_parameters}]})

        # FIXME: also read permissable overrides from device config and set from job data
        # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default
        for action_data in data['actions']:
            action_data.pop('yaml_line', None)
            for name in action_data:
                # Set a default namespace if needed
                namespace = action_data[name].setdefault('namespace', 'common')
                test_counts.setdefault(namespace, 1)

                if name == 'deploy' or name == 'boot' or name == 'test':
                    parse_action(action_data, name, device, pipeline,
                                 test_info, test_counts[namespace])
                    if name == 'test':
                        test_counts[namespace] += 1
                elif name == 'repeat':
                    count = action_data[name]['count']  # first list entry must be the count dict
                    repeats = action_data[name]['actions']
                    for c_iter in range(count):
                        for repeating in repeats:  # block of YAML to repeat
                            for repeat_action in repeating:  # name of the action for this block
                                if repeat_action == 'yaml_line':
                                    continue
                                repeating[repeat_action]['repeat-count'] = c_iter
                                namespace = repeating[repeat_action].setdefault('namespace', 'common')
                                test_counts.setdefault(namespace, 1)
                                parse_action(repeating, repeat_action, device,
                                             pipeline, test_info, test_counts[namespace])
                                if repeat_action == 'test':
                                    test_counts[namespace] += 1

                elif name == 'command':
                    action = CommandAction()
                    action.parameters = action_data[name]
                    pipeline.add_action(action)

                else:
                    raise JobError("Unknown action name '%s'" % name)

        # there's always going to need to be a finalize_process action
        finalize = FinalizeAction()
        pipeline.add_action(finalize)
        finalize.populate(None)
        job.pipeline = pipeline
        if 'compatibility' in data:
            try:
                job_c = int(job.compatibility)
                data_c = int(data['compatibility'])
            except ValueError as exc:
                raise JobError('invalid compatibility value: %s' % exc)
            if job_c < data_c:
                raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c))
        return job
Ejemplo n.º 22
0
    def parse(self, content, device, job_id, logger, dispatcher_config,
              env_dut=None):
        self.loader = yaml.SafeLoader(content)
        self.loader.compose_node = self.compose_node
        self.loader.construct_mapping = self.construct_mapping
        data = self.loader.get_single_data()
        job = Job(job_id, data, logger)
        test_counts = {}
        job.device = device
        job.parameters['env_dut'] = env_dut
        # Load the dispatcher config
        job.parameters['dispatcher'] = {}
        if dispatcher_config is not None:
            config = yaml.safe_load(dispatcher_config)
            if isinstance(config, dict):
                job.parameters['dispatcher'] = config

        level_tuple = Protocol.select_all(job.parameters)
        # sort the list of protocol objects by the protocol class level.
        job.protocols = [item[0](job.parameters, job_id) for item in sorted(level_tuple, key=lambda level_tuple: level_tuple[1])]
        pipeline = Pipeline(job=job)
        self._timeouts(data, job)

        # deploy and boot classes can populate the pipeline differently depending
        # on the test action type they are linked with (via namespacing).
        # This code builds an information dict for each namespace which is then
        # passed as a parameter to each Action class to use.
        test_info = {}
        test_actions = ([action for action in data['actions'] if 'test' in action])
        for test_action in test_actions:
            test_parameters = test_action['test']
            test_type = LavaTest.select(device, test_parameters)
            namespace = test_parameters.get('namespace', 'common')
            connection_namespace = test_parameters.get('connection-namespace', namespace)
            if namespace in test_info:
                test_info[namespace].append({'class': test_type, 'parameters': test_parameters})
            else:
                test_info.update({namespace: [{'class': test_type, 'parameters': test_parameters}]})
            if namespace != connection_namespace:
                test_info.update({connection_namespace: [{'class': test_type, 'parameters': test_parameters}]})

        # FIXME: also read permissable overrides from device config and set from job data
        # FIXME: ensure that a timeout for deployment 0 does not get set as the timeout for deployment 1 if 1 is default
        for action_data in data['actions']:
            action_data.pop('yaml_line', None)
            for name in action_data:
                # Set a default namespace if needed
                namespace = action_data[name].setdefault('namespace', 'common')
                test_counts.setdefault(namespace, 1)

                if name == 'deploy' or name == 'boot' or name == 'test':
                    parse_action(action_data, name, device, pipeline,
                                 test_info, test_counts[namespace])
                    if name == 'test':
                        test_counts[namespace] += 1
                elif name == 'repeat':
                    count = action_data[name]['count']  # first list entry must be the count dict
                    repeats = action_data[name]['actions']
                    for c_iter in range(count):
                        for repeating in repeats:  # block of YAML to repeat
                            for repeat_action in repeating:  # name of the action for this block
                                if repeat_action == 'yaml_line':
                                    continue
                                repeating[repeat_action]['repeat-count'] = c_iter
                                namespace = repeating[repeat_action].setdefault('namespace', 'common')
                                test_counts.setdefault(namespace, 1)
                                parse_action(repeating, repeat_action, device,
                                             pipeline, test_info, test_counts[namespace])
                                if repeat_action == 'test':
                                    test_counts[namespace] += 1

                elif name == 'command':
                    action = CommandAction()
                    action.parameters = action_data[name]
                    pipeline.add_action(action)

                else:
                    raise JobError("Unknown action name '%s'" % name)

        # there's always going to need to be a finalize_process action
        finalize = FinalizeAction()
        pipeline.add_action(finalize)
        finalize.populate(None)
        job.pipeline = pipeline
        if 'compatibility' in data:
            try:
                job_c = int(job.compatibility)
                data_c = int(data['compatibility'])
            except ValueError as exc:
                raise JobError('invalid compatibility value: %s' % exc)
            if job_c < data_c:
                raise JobError('Dispatcher unable to meet job compatibility requirement. %d > %d' % (job_c, data_c))
        return job
Ejemplo n.º 23
0
def team_settings_yaml_to_status_label_lookup(
        settings_yaml: str) -> Dict[str, str]:
    """Parse crazy YAML into a simple lookup table.

    Raise any kind of error if the YAML does not match expectations.
    """
    def read_hash_with_indifferent_access(loader, node):
        """!ruby/hash:ActiveSupport::HashWithIndifferentAccess"""
        return loader.construct_mapping(node)

    def read_action_controller_parameters(loader, node):
        """!ruby/hash-with-ivars:ActionController::Parameters"""
        outer = loader.construct_mapping(node)
        return outer["elements"]

    def read_ruby_set(loader, node):
        """!ruby/object:Set

        # pyyaml doesn't support complex key syntax, which "!ruby/object:Set"
        # uses. (Presumably, Ruby folks decided that encoding a set as a list
        # would be too obvious.) So we nix it all. We don't need sets to read
        # statuses. And luckily, pyyaml's parser is able to skip the tokens
        # when they aren't used.
        """
        return ["<set ignored>"]

    import yaml

    loader = yaml.SafeLoader(settings_yaml)
    loader.add_constructor(
        "!ruby/hash:ActiveSupport::HashWithIndifferentAccess",
        read_hash_with_indifferent_access,
    )
    loader.add_constructor(
        "!ruby/hash-with-ivars:ActionController::Parameters",
        read_action_controller_parameters,
    )
    loader.add_constructor("!ruby/object:Set", read_ruby_set)

    try:
        settings = loader.get_single_data()
    finally:
        loader.dispose()

    def get_first(d: Dict[str, Any],
                  keys: List[str],
                  default: Any = None) -> Any:
        """Like d.get(key, default) ... for the first matching key of keys.

        Ruby-encoded YAML tends to sometimes key by _symbol_ and other times key
        by _string_. This is shorthand to try both.

        Usage:

            statuses = get_first(d, [":statuses", "statuses"])
        """
        for key in keys:
            if key in d:
                return d[key]
        else:
            return default

    statuses_outer = get_first(
        settings,
        [":media_verification_statuses", "media_verification_statuses"])
    if statuses_outer is None:
        return {}
    statuses = get_first(statuses_outer, [":statuses", "statuses"])
    if statuses is None:
        raise RuntimeError(
            "Missing Team.settings.media_verification_statuses.statuses")

    ret = {}
    for status in statuses:
        status_id = get_first(status, [":id", "id"])
        status_label = get_first(status, [":label", "label"])
        if status_id is None or status_label is None:
            raise RuntimeError("Status %r missing id or label" % status)
        ret[status_id] = status_label
    return ret