Exemple #1
0
 def load_setting(self, project_repo, base, ext):
     path = project_repo + base + ext
     with open(path) as f:
         data = None
         if ext in ['.yml', '.yaml']:
             data = yaml.load(f)
         elif ext in ['.json5']:
             data = json5.load(f)
         elif ext in ['.json']:
             data = json.load(f)
         self._loaded[base] = EasyDict(data)
Exemple #2
0
def _do_parse(inp, fmt, encoding, force_types):
    """Actually parse input.

    Args:
        inp: bytes yielding file-like object
        fmt: format to use for parsing
        encoding: encoding of `inp`
        force_types:
            if `True`, integers, floats, booleans and none/null
                are recognized and returned as proper types instead of strings;
            if `False`, everything is converted to strings
            if `None`, backend return value is used
    Returns:
        parsed `inp` (dict or list) containing unicode values
    Raises:
        various sorts of errors raised by used libraries while parsing
    """
    res = {}
    _check_lib_installed(fmt, 'parse')

    if fmt == 'ini':
        cfg = configobj.ConfigObj(inp, encoding=encoding)
        res = cfg.dict()
    elif fmt == 'json':
        if six.PY3:
            # python 3 json only reads from unicode objects
            inp = io.TextIOWrapper(inp, encoding=encoding)
        res = json.load(inp, encoding=encoding)
    elif fmt == 'json5':
        if six.PY3:
            inp = io.TextIOWrapper(inp, encoding=encoding)
        res = json5.load(inp, encoding=encoding)
    elif fmt == 'toml':
        if not _is_utf8(encoding):
            raise AnyMarkupError('toml is always utf-8 encoded according to specification')
        if six.PY3:
            # python 3 toml prefers unicode objects
            inp = io.TextIOWrapper(inp, encoding=encoding)
        res = toml.load(inp)
    elif fmt == 'xml':
        res = xmltodict.parse(inp, encoding=encoding)
    elif fmt == 'yaml':
        # guesses encoding by its own, there seems to be no way to pass
        #  it explicitly
        res = yaml.safe_load(inp)
    else:
        raise  # unknown format

    # make sure it's all unicode and all int/float values were parsed correctly
    #   the unicode part is here because of yaml on PY2 and also as workaround for
    #   https://github.com/DiffSK/configobj/issues/18#issuecomment-76391689
    return _ensure_proper_types(res, encoding, force_types)
Exemple #3
0
    def load(self, path):
        """Populate the Config class with everything that sits inside
        the given JSON file path

        path (string): the path of the JSON config file.

        """
        try:
            with io.open(path, 'rb') as fobj:
                data = json.load(fobj)

                # Put everything.
                for key, value in data.iteritems():
                    setattr(self.args, key, value)
        except IOError:
            logger.exception("Error opening config file.")
        except ValueError:
            logger.exception("Config file is invalid JSON.")
        else:
            logger.info("Using config file %s.", path)
Exemple #4
0
def load_config(input):
    with open(input) as f:
        content = json5.load(f)
        return cfggen.build_config(content)
Exemple #5
0
def readjson(filename):
    fp = open(filename)
    j = json5.load(fp)
    return j
Exemple #6
0
 def __init__(self):
     with open('resources/versions.jrncontrol', 'r') as f:
         self.versions = json5.load(f)
Exemple #7
0
if args.config_file == None:
    error_message()
    print(
        "\tNo config file was entered. Please add a config file using the -c option."
    )
    json_files = glob.glob('*.json')
    if len(json_files) == 0:
        print('\tNo JSON files are in your present working directory.')
    else:
        print('\tJSON files in your present working directory are:')
        for file in json_files:
            print('\t\t' + file)
    print()
    sys.exit(1)

config_file = json5.load(args.config_file)
output_prefix = args.output_prefix


def get_file_info(file_name, files_information_dict, long_or_short, group_name,
                  mstim):
    search_result = re.search(r"(Str\d)_(.+)_(Cage\w)", file_name)
    if not search_result:
        print("No information was obtained for file: " + file_name)
        return files_information_dict
    stripe = search_result.group(1)
    treatment = search_result.group(2)
    cage = search_result.group(3)
    animal = treatment + '_' + cage + '_' + stripe
    if animal not in files_information_dict:
        files_information_dict[animal] = {}
Exemple #8
0
def dump_json_bus_candidates(
    output,
    component_json5,
    i_bus_mappings,
    debug=False,
):

    def expand_if_vector(interface, port):
        if interface.is_vector(port):
            return [p[0] for p in interface.get_vector(port)]
        else:
            return port
    def json_format(p):
        return (p[0], None if p[1] == None else int(p[1]), int(p[2]))
    def ref_from_name(name):
        return {'$ref': '#/definitions/busDefinitions/{}'.format(name)}
    def get_cost_obj(interface, bm):
        port_names = [p[0] for p in interface.ports]
        prefix = common_prefix(port_names)
        o = [
            ('num_ports', interface.size),
            ('prefix', prefix),
            ('num-direction-mismatch', int(bm.cost.dc)),
            ('num-width-mismatch', int(bm.cost.wc)),
        ]
        return o

    def get_cnt_base():
        with open(component_json5) as fin:
            block_obj = json5.load(fin)
        try:
            return block_obj['definitions']['pg_cnt']
        except:
            return 0

    portgroup_objs = []
    busint_objs = []
    busint_obj_map = {}
    busint_refs = []
    busint_alt_refs = []
    pg_cnt_base = get_cnt_base()

    for i, (interface, bus_mappings) in enumerate(sorted(
        i_bus_mappings,
        key=lambda x: x[0].size,
        reverse=True,
    )):
        pg_busints = []
        for j, bus_mapping in enumerate(bus_mappings):
            bm = bus_mapping
            busint_name = 'busint-portgroup_{}-mapping_{}-prefix_{}-{}-{}'.format(
                pg_cnt_base+i,
                j,
                interface.prefix.strip('_'),
                bm.bus_def.driver_type,
                bm.bus_def.abstract_type.name,
            )
            # for all ports in mapping, just include port names and exclude width+direction
            sbm_map  = {k:v for k,v in bm.sideband_mapping.items() if v != None}
            sbm_umap = [k for k,v in bm.sideband_mapping.items() if v == None]

            mapped_sideband_ports = list(sorted(
                sbm_map.items(),
                key=lambda x: bm.match_cost_func(x[0], x[1]),
            ))
            mapped_ports = list(sorted(
                bm.m.items(),
                key=lambda x: bm.match_cost_func(x[0], x[1]),
            ))

            # format portmap object
            portmap_o = {}
            portmap_o.update({bp[0]:pp[0] for pp, bp in mapped_ports})
            # expand vectors in output
            portmap_o = {
                bpn : expand_if_vector(interface, ppn) for bpn, ppn in portmap_o.items()
            }
            # add user group ports
            for uport, ports in sorted(bm.user_group_mapping.items()):
                pp = [expand_if_vector(interface, p[0]) for p in ports]
                portmap_o.update({uport[0]:pp})
            # any ports that aren't map to normal busdef {req,opt} port or
            # a busdef specified user group are unmapped
            if len(bm.unmapped_ports) > 0:
                portmap_o['__UMAP__'] = [
                    expand_if_vector(interface, p[0]) for p in bm.unmapped_ports
                ]
            # FIXME remove old
            # provide best guess mapping for sideband signals
            #portmap_o.update({bp[0]:pp[0] for pp, bp in mapped_sideband_ports})
            #if len(sbm_umap) > 0:
            #    portmap_o['__UMAP__'] = [p[0] for p in sbm_umap]

            # format debug portmap object that tags req, opt, sideband signals
            req_mapped_names = [NoIndent((bp[0], pp[0])) for pp, bp in mapped_ports if bp in bm.bus_def.req_ports]
            opt_mapped_names = [NoIndent((bp[0], pp[0])) for pp, bp in mapped_ports if bp in bm.bus_def.opt_ports]
            sideband_names =   [NoIndent((bp[0], pp[0])) for pp, bp in mapped_sideband_ports]
            sideband_names.extend([NoIndent((None, pp[0])) for pp in sbm_umap])
            debug_portmap_o = [
                ('req_mapped', req_mapped_names),
                ('opt_mapped', opt_mapped_names),
                ('user_sideband', sideband_names),
                ('unmapped', []),
            ]
            o = {
                'name': interface.prefix.strip('_'),
                'interfaceMode': bm.bus_def.driver_type,
                'busType': bm.bus_def.bus_type,
                'abstractionTypes': [{
                    'viewRef': 'RTLview',
                    'portMaps': portmap_o if not debug else debug_portmap_o,
                }],
            }
            pg_busints.append((busint_name, o))

        assert len(pg_busints) > 0
        busint_objs.extend([o for name, o in pg_busints])
        busint_refs.append(ref_from_name(pg_busints[0][0]))
        busint_alt_refs.extend(
            [ref_from_name(name) for name, o in pg_busints[1:]]
        )
        busint_obj_map.update({name:o for name, o in pg_busints})

        pgo = (
            'portgroup_{}'.format(i),
            # show cost of best bus mapping
            [NoIndent(e) for e in get_cost_obj(interface, bus_mappings[0])],
            #[NoIndent(json_format(p)) for p in sorted(interface.ports)],
        )
        portgroup_objs.append(pgo)

    # update input block object with mapped bus interfaces and alternates
    with open(component_json5) as fin:
        block_obj = json5.load(fin)
    dkey = 'definitions'
    if dkey not in block_obj:
        block_obj[dkey] = {}

    # bump counter for the case portinf is run again
    block_obj[dkey]['pg_cnt'] = pg_cnt_base + len(i_bus_mappings)

    bdkey = 'busDefinitions'
    if bdkey not in block_obj[dkey]:
        block_obj[dkey][bdkey] = {}
    block_obj[dkey][bdkey].update(busint_obj_map)

    bmkey = 'busMappedPortGroups'
    if bmkey not in block_obj[dkey]:
        block_obj[dkey][bmkey] = []
    block_obj[dkey][bmkey].extend(portgroup_objs)
    assert 'component' in block_obj, \
        'component key not defined in input block object'
    comp_obj = block_obj['component']

    bkey = 'busInterfaces'
    refs = [] if bkey not in comp_obj else comp_obj[bkey]
    refs.extend(busint_refs)
    comp_obj[bkey] = [NoIndent(o) for o in refs]

    abkey = 'busInterfaceAlts'
    refs = [] if abkey not in comp_obj else comp_obj[abkey]
    refs.extend(busint_alt_refs)
    comp_obj[abkey] = [NoIndent(o) for o in refs]

    if debug:
        block_obj = [
            ('portGroups', portgroup_objs),
            ('busInterfaces', busint_refs),
            ('busDefinitions', busint_objs),
        ]
    s = json.dumps(block_obj, indent=4, cls=PrettyPrintEncoder)

    if hasattr(output, 'write'):
        _ = output.write(s)
    else:
        with open(output, 'w') as fout:
            fout.write(s)
    return
    jsons_list = glob.glob("*.json")
    if len(jsons_list) == 1:
        json_file = jsons_list[0]
    try:
        if len(jsons_list) == 0:
            raise ValueError('No JSON file present to use as config file')
        if len(jsons_list) > 1:
            raise ValueError(
                'More than one JSON file present and no file specified as config file. Please specify your config file with --config_file.'
            )
    except ValueError as error:
        print(repr(error))
        raise SystemExit()

with open(json_file) as file:
    databases_list = json5.load(file)

directory = args.LOVD3_directory
output_directory = args.output_directory
if not output_directory:
    # If no output directory is specified, save the output in the same directory as the input
    output_directory = directory

if not disease_names:
    # If the disease names are not specified at the command line, you can find them from the subdirectories in the output directory
    disease_names = []
    first_database = databases_list[0]["name"]
    subdirectories = glob.glob(
        directory + "/" + first_database +
        '/*/')  # list all subdirectories in the first database directory
    for subdir in subdirectories:
Exemple #10
0
            )
            config[key] = template[key]
            edited = True
        elif isinstance(template[key], dict):
            if not isinstance(config[key], dict):
                logger.warning(
                    f"Expect json object at {path}.{key}, {type(config[key])} found in config file. Copy from the template."
                )
                config[key] = template[key]
                edited = True
            edited = __check_settings(template[key], config[key],
                                      f"{path}.{key}") or edited
    return edited


if os.path.exists("./settings.json"):
    with open("./settings.json", "r", encoding="utf8") as f:
        settings = json5.load(f)
else:
    settings = dict()

with open("./settings.template.json", "r", encoding="utf8") as f:
    __template = json5.load(f)

__edited = __check_settings(__template, settings)
if __edited:
    with open("./settings.json", "w", encoding="utf8") as f:
        json5.dump(settings, f, ensure_ascii=False, quote_keys=True)

logger.info("Config file was loaded successfully.")
            ': ' if book == '' else ' (enter for {})'.format(book)))
        outputInput = input('Input dictionary path' + (
            ': ' if output == '' else ' (enter for {})'.format(output)))
        book = book if bookInput == '' else bookInput
        output = output if outputInput == '' else outputInput

        # Save last path
        last['date'] = date
        last['bookPath'] = book
        last['outputPath'] = output
        override(lastCfgFile, last)

    # Open file
    with open(book) as bookFile, openRW(output) as outFile:
        # Read existing json
        book = json5.load(bookFile)
        out = json5.loads(readJsonStr(outFile))

        # Save function
        def save():
            override(outFile, out)

        # Make sure that all meta info keys exist
        print('Checking dictionary info...')
        for key in [
                'name', 'description', 'author', 'license', 'wordLanguage',
                'definitionLanguage'
        ]:
            if key not in out:
                out[key] = input('> Dictionary {}: '.format(key))
        print('Dictionary info checked.')
Exemple #12
0
)
parser.add_argument(
    '--test',
    default=False,
    action='store_true',
    help='check for feasibility',
)
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)

timer = time.time()
start = timer

# load config
with open(args.config) as f:
    config = json5.load(f)

# build up the data generator
with open(config['generator']['data']) as f:
    data_config = json5.load(f)
data_list = data_config['list']
if args.test:
    data_list = data_list[:1]
loader_config = data_config['loader']
loader_name = loader_config.pop('name')
data_loader = DataLoader(loader_name, **loader_config)
data_loader.set_data_list(data_list)
data_gen = DataGenerator(data_loader, config['generator']['struct'])

# build up the reverter
reverter = Reverter(data_gen)
Exemple #13
0
import json5
filename = 'game_information'
new_dict = {'high_score':300}
with open(filename,'w') as  f:
    json5.dump(new_dict,f)


with open(filename) as f:
    data = json5.load(f)
print(data['high_score'])
Exemple #14
0
def do_init(args):
    """Initialize a model directory for training"""
    import json5
    import numpy as np
    import scipy.io.wavfile
    from gruut_ipa import IPA

    dataset_items: typing.Dict[str, DatasetItem] = {}

    model_dir = Path(args.model)
    language = args.language
    dataset_dir = Path(args.dataset)
    model_name = args.name or model_dir.name

    _LOGGER.debug("Loading gruut language %s", language)
    gruut_lang = gruut.Language.load(language)
    assert gruut_lang, f"Unsupported language: {gruut_lang}"

    # Create base output directory
    model_dir.mkdir(parents=True, exist_ok=True)

    # Load metadata
    metadata_path = dataset_dir / "metadata.csv"
    _LOGGER.debug("Loading metadata file from %s", metadata_path)
    _LOGGER.debug("Expecting WAV files in %s", dataset_dir)

    with open(metadata_path, "r") as metadata_file:
        for line in metadata_file:
            line = line.strip()
            if line:
                item_id, item_text = line.split("|", maxsplit=1)
                wav_path = dataset_dir / f"{item_id}.wav"
                if not wav_path.is_file():
                    _LOGGER.warning("Missing %s", wav_path)
                    continue

                dataset_items[item_id] = DatasetItem(id=item_id,
                                                     text=item_text,
                                                     wav_path=wav_path)

    assert dataset_items, "No items in dataset"
    _LOGGER.debug("Loaded transcripts for %s item(s)", len(dataset_items))

    # -------------
    # Phoneme Cache
    # -------------

    pad = "_"

    # Acute/grave accents (' and ²)
    accents = []
    if gruut_lang.keep_accents:
        accents = [IPA.ACCENT_ACUTE.value, IPA.ACCENT_GRAVE.value]

    # Primary/secondary stress (ˈ and ˌ)
    # NOTE: Accute accent (0x0027) != primary stress (0x02C8)
    stresses = []
    if gruut_lang.keep_stress:
        stresses = [IPA.STRESS_PRIMARY.value, IPA.STRESS_SECONDARY.value]

    # Tones
    tones = gruut_lang.tones

    # Word break
    word_break = [IPA.BREAK_WORD.value]
    if args.no_word_breaks:
        word_break = []

    # Always include pad and break symbols.
    # In the future, intontation/tones should also be added.
    phonemes_list = ([pad, IPA.BREAK_MINOR.value, IPA.BREAK_MAJOR.value] +
                     word_break + accents + stresses + tones +
                     sorted([p.text for p in gruut_lang.phonemes]))

    # Write phonemes to a text file
    phonemes_text_path = model_dir / "phonemes.txt"
    with open(phonemes_text_path, "w") as phonemes_text_file:
        for phoneme_idx, phoneme in enumerate(phonemes_list):
            print(phoneme_idx, phoneme, file=phonemes_text_file)

    # Index where actual model phonemes start
    phoneme_offset = 1

    # Map to indexes
    phonemes = {p: i for i, p in enumerate(phonemes_list)}

    _LOGGER.debug("Phonemes: %s", phonemes)

    phoneme_cache_dir = model_dir / "phoneme_cache"
    phoneme_cache_dir.mkdir(parents=True, exist_ok=True)

    if not args.skip_phonemes:
        _compute_phonemes(
            dataset_items,
            gruut_lang,
            phonemes,
            model_dir,
            phoneme_cache_dir,
            word_breaks=not args.no_word_breaks,
        )

    # Write phonemized sentences
    if phoneme_cache_dir.is_dir():
        dataset_phonemes_path = model_dir / "dataset_phonemes.csv"

        with open(dataset_phonemes_path, "w") as dataset_phonemes_file:
            phonemes_writer = csv.writer(dataset_phonemes_file, delimiter="|")
            phonemes_writer.writerow(("id", "text", "phonemes"))

            for phoneme_path in phoneme_cache_dir.glob("*.npy"):
                item_id = re.sub("_phoneme$", "", phoneme_path.stem)
                sequence = np.load(phoneme_path, allow_pickle=True)
                actual_phonemes = [phonemes_list[index] for index in sequence]

                item = dataset_items.get(item_id)
                if item:
                    actual_phonemes_str = " ".join(actual_phonemes)
                    phonemes_writer.writerow(
                        (item_id, item.text, actual_phonemes_str))
                else:
                    _LOGGER.warning(
                        "Item %s is in phoneme cache but not in dataset",
                        item_id)

    # ----------
    # TTS Config
    # ----------

    # Get sample rate from first WAV file
    first_item = next(iter(dataset_items.values()))
    sample_rate, _ = scipy.io.wavfile.read(first_item.wav_path)
    _LOGGER.debug("Assuming sample rate is %s Hz", sample_rate)

    # Path to MozillaTTS submodule
    tts_dir = _DIR.parent / "TTS"

    # Load TTS model base config
    tts_configs_dir = tts_dir / "TTS" / "tts" / "configs"

    model_type = args.model_type.strip().lower()
    if model_type == "tacotron2":
        tts_config_in_path = tts_configs_dir / "config.json"
    elif model_type == "glowtts":
        tts_config_in_path = tts_configs_dir / "glow_tts_gated_conv.json"
    else:
        raise ValueError(f"Unexpected model type: {model_type}")

    _LOGGER.debug("Loading TTS config template from %s", tts_config_in_path)
    with open(tts_config_in_path, "r") as tts_config_file:
        tts_config = json5.load(tts_config_file)

    # Patch configuration and write to output directory
    tts_config["run_name"] = model_name

    tts_config["audio"]["sample_rate"] = sample_rate
    tts_config["audio"]["do_trim_silence"] = True
    tts_config["audio"]["signal_norm"] = True

    tts_config["output_path"] = str(model_dir / "model")
    tts_config["phoneme_cache_path"] = str(phoneme_cache_dir)
    tts_config["phoneme_language"] = language
    tts_config["phoneme_backend"] = "gruut"

    # Disable mixed precision
    tts_config["mixed_precision"] = False

    # Disable global style tokens
    tts_config["use_gst"] = False

    if "gst" not in tts_config:
        tts_config["gst"] = {}

    tts_config["gst"]["gst_use_speaker_embedding"] = False

    # Disable speaker embedding
    tts_config["use_external_speaker_embedding_file"] = False
    tts_config["external_speaker_embedding_file"] = None
    tts_config["use_speaker_embedding"] = False

    # Use custom phonemes
    tts_config["use_phonemes"] = True
    tts_config["enable_eos_bos_chars"] = False
    tts_config["characters"] = {
        "pad": pad,
        "eos": "~",
        "bos": "^",
        "phonemes": phonemes_list[phoneme_offset:],
        "characters": "",
        "punctuations": "",
        "eos_bos_phonemes": False,
        "sort_phonemes": False,
        "word_breaks": not args.no_word_breaks,
    }

    tts_config["datasets"] = [{
        "name": "ipa_tts",
        "path": str(dataset_dir),
        "meta_file_train": "metadata.csv",
        "meta_file_val": None,
    }]

    # Gruut will do the cleaning
    tts_config["text_cleaner"] = "no_cleaners"

    # Delay testing a little later and do it less frequently
    tts_config["test_delay_epochs"] = 100
    tts_config["test_n_epochs"] = 10

    # Test sentences
    test_sentences = _TEST_SENTENCES.get(language)
    if test_sentences:
        test_sentences_path = model_dir / "test_sentences.txt"
        with open(test_sentences_path, "w") as test_sentences_file:
            for sentence in test_sentences:
                print(sentence, file=test_sentences_file)

        tts_config["test_sentences_file"] = str(test_sentences_path)

    # -------------------
    # Compute Audio Stats
    # -------------------

    tts_stats_path = str(model_dir / "scale_stats.npy")

    if not args.skip_audio_stats:
        _compute_audio_stats(dataset_items, tts_config, tts_stats_path)

    tts_config["audio"]["stats_path"] = str(tts_stats_path)

    # Write TTS config
    tts_config_out_path = model_dir / "config.json"
    with open(tts_config_out_path, "w") as tts_config_file:
        json.dump(tts_config, tts_config_file, indent=4, ensure_ascii=False)

    _LOGGER.debug("Wrote TTS config to %s", tts_config_out_path)

    # --------------
    # Vocoder config
    # --------------

    vocoder_dir = model_dir / "vocoder"
    vocoder_dir.mkdir(parents=True, exist_ok=True)

    if args.vocoder_model_type == "multiband-melgan":
        vocoder_config_in_path = (tts_dir / "TTS" / "vocoder" / "configs" /
                                  "multiband_melgan_config.json")
    elif args.vocoder_model_type == "fullband-melgan":
        vocoder_config_in_path = (tts_dir / "TTS" / "vocoder" / "configs" /
                                  "universal_fullband_melgan.json")
    else:
        raise ValueError(
            f"Unknown vocoder model type: {args.vocoder_model_type}")

    _LOGGER.debug("Loading vocoder config template from %s",
                  vocoder_config_in_path)
    with open(vocoder_config_in_path, "r") as vocoder_config_file:
        vocoder_config = json5.load(vocoder_config_file)

    # Patch vocoder config
    vocoder_config["data_path"] = str(dataset_dir)
    vocoder_config["run_name"] = model_name
    vocoder_config["output_path"] = str(vocoder_dir / "model")

    # Use same audio configuration as voice
    vocoder_config["audio"] = tts_config["audio"]

    if args.vocoder_batch_size:
        vocoder_config["batch_size"] = args.vocoder_batch_size

    vocoder_config_out_path = vocoder_dir / "config.json"
    with open(vocoder_config_out_path, "w") as vocoder_out_file:
        json.dump(vocoder_config,
                  vocoder_out_file,
                  indent=4,
                  ensure_ascii=False)

    _LOGGER.debug("Wrote vocoder config to %s", vocoder_config_out_path)
Exemple #15
0
    'Awards': ["OR", "NV", "NY"]
}

player2 = {
    'PlayerName': "Hillary Clinton",
    'Score': 346,
    'Awards': ["WI", "TX", "MI"]
}

myPlayers = []
myPlayers.append(player1)
myPlayers.append(player2)

# ------ SAVE BY JSON ------

json5.dump(myPlayers, myfile)
myfile.close()

# ------ LOAD BY JSON ------

myfile = open(filename, mode='r')
json_data = json5.load(myfile)

for user in json_data:
    print("Player Name is: " + str(user['PlayerName']))
    print("Player Score is: " + str(user['Score']))
    print("Player Awards is: " + str(user['Awards'][0]))
    print("Player Awards is: " + str(user['Awards'][1]))
    print("Player Awards is: " + str(user['Awards'][2]))
    print("--------------------------------------------\n\n")
def draft07_strict_meta_schema():
    with open(
            Path(__file__).parents[2] / 'support' /
            'draft-07-strict-schema.json5') as f:
        return json5.load(f)
def _post_process(args: Object, mode: str):
    # if not args.output_dir.startswith('models'):
    #     args.output_dir = os.path.join('models', args.output_dir)
    os.makedirs(args.output_dir, exist_ok=True)
    if not args.name:
        args.name = str(datetime.now())
    args.summary_dir = os.path.join(args.output_dir, args.name)
    if mode.lower() == "train":  #  清空模型目录
        if os.path.exists(args.summary_dir):
            shutil.rmtree(args.summary_dir)
        os.makedirs(args.summary_dir)
    data_config_file = os.path.join(args.output_dir, 'data_config.json5')
    if os.path.exists(data_config_file):
        with open(data_config_file) as f:
            config = json5.load(f)
            for k, v in config.items():
                args_v = "none"
                if hasattr(args, k):
                    args_v = getattr(args, k)
                if args_v != v:
                    print(
                        curLine(),
                        "wrong for config: args.%s=%s, not equal to %s" %
                        (k, args_v, v))
                    print(
                        'ERROR: Data configurations are different. Please use another output_dir or '
                        'remove the older one manually.')
                    exit()
    else:
        with open(data_config_file, 'w') as f:
            keys = [
                'data_dir', 'min_df', 'max_vocab', 'max_len', 'min_len',
                'lower_case', 'pretrained_embeddings', 'embedding_mode'
            ]
            json5.dump({k: getattr(args, k) for k in keys}, f)
    args.metric = args.metric.lower()
    args.watch_metrics = [m.lower() for m in args.watch_metrics]
    if args.metric not in args.watch_metrics:
        args.watch_metrics.append(args.metric)
    assert args.pretrained_embeddings, 'pretrained embeddings must be provided.'

    def samples2steps(n):
        return int(math.ceil(n / args.batch_size))

    if not hasattr(args, 'log_per_updates'):
        args.log_per_updates = samples2steps(args.log_per_samples)
    if not hasattr(args, 'eval_per_updates'):
        args.eval_per_updates = samples2steps(args.eval_per_samples)
    if not hasattr(args, 'eval_per_updates_warmup'):
        args.eval_per_updates_warmup = samples2steps(
            args.eval_per_samples_warmup)
    if not hasattr(args, 'eval_warmup_steps'):
        args.eval_warmup_steps = samples2steps(args.eval_warmup_samples)
    if not hasattr(args, 'min_steps'):
        args.min_steps = samples2steps(args.min_samples)
    if not hasattr(args, 'early_stopping'):
        args.early_stopping = samples2steps(args.tolerance_samples)
    if not hasattr(args, 'lr_warmup_steps'):
        args.lr_warmup_steps = samples2steps(args.lr_warmup_samples)
    if not hasattr(args, 'lr_decay_steps'):
        args.lr_decay_steps = samples2steps(args.lr_decay_samples)
    loss_function = initialize_config(config["loss_function"])

    trainer = JointTrainer(config=config,
                           resume=resume,
                           model=model,
                           optim=optimizer,
                           loss_function=loss_function,
                           train_dl=train_dataloader,
                           validation_dl=validation_dataloader)

    trainer.train()


if __name__ == '__main__':
    # parser = argparse.ArgumentParser(description="SimpleCNN")
    # parser.add_argument("-C", "--configuration", required=True, type=str, default='config/simple_cnn.json5',
    #                     help="Configuration (*.json).")
    # parser.add_argument("-R", "--resume", action="store_true", default=False,
    #                     help="Resume experiment from latest checkpoint.")
    # args = parser.parse_args()

    config_path = "config/20200323_joint_simple_cnn.json5"

    configuration = json5.load(open(config_path))
    configuration["experiment_name"], _ = os.path.splitext(
        os.path.basename(config_path))
    configuration["config_path"] = config_path

    main(configuration, resume=False)
 def build_tree(self, path: str, options: Optional[BuildOptions] = None) -> TreeNode:
     with open(path) as f:
         return build_tree(json5.load(f), options)
Exemple #20
0
def load_metadata():
  flags_path = os.path.join(ROOT_PATH, 'chrome', 'browser',
                            'flag-metadata.json')
  return json5.load(open(flags_path))
Exemple #21
0
    def load_config(self, config_file: str) -> None:
        """Method to load a network configuration file.

        Network should be specified in json format.
        Will populate nodes, qchannels, cchannels, and graph fields.
        Will also generate and install forwarding tables for quantum router nodes.

        Args:
            config_file (str): path to json file specifying network.

        Side Effects:
            Will modify graph, graph_no_middle, qchannels, and cchannels attributes.
        """

        topo_config = json5.load(open(config_file))

        # create nodes
        for node_params in topo_config["nodes"]:
            name = node_params.pop("name")
            node_type = node_params.pop("type")

            if node_type == "QKDNode":
                node = QKDNode(name, self.timeline, **node_params)
            elif node_type == "QuantumRouter":
                node = QuantumRouter(name, self.timeline, **node_params)
            else:
                node = Node(name, self.timeline)

            self.add_node(node)

        # create discrete cconnections (two way classical channel)
        if "cconnections" in topo_config:
            for cchannel_params in topo_config["cconnections"]:
                node1 = cchannel_params.pop("node1")
                node2 = cchannel_params.pop("node2")
                self.add_classical_connection(node1, node2, **cchannel_params)

        # create discrete cchannels
        if "cchannels" in topo_config:
            for cchannel_params in topo_config["cchannels"]:
                node1 = cchannel_params.pop("node1")
                node2 = cchannel_params.pop("node2")
                self.add_classical_channel(node1, node2, **cchannel_params)

        # create cchannels from a RT table
        if "cchannels_table" in topo_config:
            table_type = topo_config["cchannels_table"].get("type", "RT")
            assert table_type == "RT", "non-RT tables not yet supported"
            labels = topo_config["cchannels_table"]["labels"]
            table = topo_config["cchannels_table"]["table"]
            assert len(labels) == len(
                table)  # check that number of rows is correct

            for i in range(len(table)):
                assert len(table[i]) == len(
                    labels)  # check that number of columns is correct
                for j in range(len(table[i])):
                    if table[i][j] == 0:  # skip if have 0 entries
                        continue
                    delay = table[i][j] / 2  # divide RT time by 2
                    cchannel_params = {"delay": delay, "distance": 1e3}
                    self.add_classical_channel(labels[i], labels[j],
                                               **cchannel_params)

        # create qconnections (two way quantum channel)
        if "qconnections" in topo_config:
            for qchannel_params in topo_config["qconnections"]:
                node1 = qchannel_params.pop("node1")
                node2 = qchannel_params.pop("node2")
                self.add_quantum_connection(node1, node2, **qchannel_params)

        # create qchannels
        if "qchannels" in topo_config:
            for qchannel_params in topo_config["qchannels"]:
                node1 = qchannel_params.pop("node1")
                node2 = qchannel_params.pop("node2")
                self.add_quantum_channel(node1, node2, **qchannel_params)

        # generate forwarding tables
        for node in self.get_nodes_by_type("QuantumRouter"):
            table = self.generate_forwarding_table(node.name)
            for dst, next_node in table.items():
                node.network_manager.protocol_stack[0].add_forwarding_rule(
                    dst, next_node)
Exemple #22
0
def check(logger, checker, user_settings, test):
    logger.debug('check type')

    checker.check((
        'path',
        'debug_level',
        'img_path',
        'video_path',
        'language',
        'vgg',
        'svm_model_path',
        'vgg_model_path',
        'xgboost_model_path',
        'model_t',
        'generation_t',
    ), str)
    checker.check((
        'videos',
        'frame_range',
        'init_box_scale',
    ), list)
    checker.check(('data'), dict)
    checker.check((
        'file_output',
        'time_debug',
        'linux',
        'sift',
        'OF',
        'detectShadows',
        'Retina',
        'debug_per_frame',
        'cuda',
    ), bool)
    checker.check((
        'delay',
        'height',
        'interval',
        'fps',
        'limit_size',
        'app_fps',
        'max_iter',
        'num_epochs',
        'step_size',
        'batch_size',
        'num_workers',
        'nthread',
        'num_round',
    ), int)
    checker.check((
        'compression_ratio',
        'varThreshold',
        'learning_rate',
        'momentum',
        'gamma',
        'init_box_scale.0.0',
        'init_box_scale.0.1',
        'init_box_scale.1.0',
        'init_box_scale.1.1',
    ), float)

    logger.debug('check legal')

    class_info_exists = checker.check('path', checker.has_file,
                                      'class_info_file')
    checker.check((
        'videos',
        'resource_path',
        'img_path',
        'video_path',
        'svm_model_path',
        'vgg_model_path',
        'xgboost_model_path',
        'language',
    ), checker.len_not, 0)
    checker.check('frame_range', checker.range)
    checker.check('debug_level', checker.within,
                  ('debug', 'info', 'warn', 'error', 'critical'))
    checker.check('vgg', checker.within, (
        '11',
        '11bn',
        '13',
        '13bn',
        '16',
        '16bn',
        '19',
        '19bn',
    ))
    checker.check('model_t', checker.within, ('xgboost', 'vgg', 'svm', 'none'))
    checker.check('generation_t', checker.within, ('video', 'image'))
    checker.check('max_iter', checker.plus_or_minus1)
    checker.check('num_workers', checker.plus_or_zero)
    checker.check('data', checker.len_is, 2)
    checker.check('data.train', checker.is_dir)
    checker.check('data.test', checker.is_dir)
    checker.check((
        'delay',
        'height',
        'interval',
        'fps',
        'limit_size',
        'compression_ratio',
        'app_fps',
        'varThreshold',
        'num_epochs',
        'learning_rate',
        'momentum',
        'step_size',
        'gamma',
        'batch_size',
        'nthread',
        'num_round',
    ), checker.plus)

    logger.debug('加入class_info并作检查')

    if class_info_exists:
        class_info_file = f"{user_settings['path']}/{user_settings['class_info_file']}"
        with open(class_info_file, encoding='utf-8') as f:
            user_settings['class_info'] = json5.load(f)
        checker.check('class_info', checker.within, Abnormal.Abnormal.names())

    if checker.dirty:
        logger.error("参数检查失败。。。请调整后再运行")
        from sys import exit
        exit(1)

    logger.debug('将debug_level转换为logging.枚举类型')

    debug_level = user_settings['debug_level']
    user_settings['debug_level'] = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'warn': logging.WARN,
        'error': logging.ERROR,
        'critical': logging.CRITICAL,
    }[debug_level]

    logger.debug("去掉路径最后的'/'")

    path = user_settings['path']
    if path[-1] == '/':
        path = path[:-1]
        user_settings['path'] = path

    logger.debug('将设置中的文件转换为绝对地址,匹配videos')

    videos = user_settings['videos']

    __videos = []
    class_info = user_settings['class_info'].keys()
    for video in videos:
        for f in glob.glob(f'{path}/{video}'):
            f = f.replace('\\', '/')
            splits = f.split('.')
            if len(splits) <= 1:
                logger.debug('文件没有扩展名')
                continue
            if splits[-1] == 'json':
                logger.debug('设置文件')
                continue
            name = splits[-2].split('/')[-1]
            if len(name) == 0:
                logger.debug('无名特殊文件')
                continue
            if name not in class_info:
                logger.debug(f'配置文件不包含文件"{name}"')
                continue
            __videos.append((name, f))

    user_settings['videos'] = __videos

    logger.debug('测试的情况下该返回了')
    if test:
        return user_settings

    logger.debug('清空输出文件夹')

    if user_settings['file_output']:
        img_path = user_settings['img_path']
        if os.path.exists(img_path):
            shutil.rmtree(img_path)
        os.mkdir(img_path)
        video_path = user_settings['video_path']

        logger.debug('img和video在一个路径下就不重复做了')

        if img_path != video_path:
            if os.path.exists(video_path):
                shutil.rmtree(video_path)
            os.mkdir(video_path)
    return user_settings
Exemple #23
0
        await super().start(self.config['tokens'].pop('discord'))

    async def _init_db(self):
        credentials = self.config['database']
        pool = await asyncpg.create_pool(**credentials)

        with open(os.path.join(BASE_DIR, 'schema.sql')) as f:
            await pool.execute(f.read())

        self.pool = pool
        self.db_ready.set()

    def _load_extensions(self):
        for extension in self.config['startup_extensions']:
            self.load_extension(extension)
            logger.info('Successfully loaded %s', extension)

    async def logout(self):
        with contextlib.suppress(AttributeError):
            await self.pool.close()
        await super().logout()


if __name__ == '__main__':
    with open(os.path.join(BASE_DIR, 'config.json5')) as f:
        config = json5.load(f)

    bot = HighlightBot(config=config)
    bot.run()
Exemple #24
0
                try:
                    o = func()
                except Exception as e:
                    message = (f'sync reload failed. Error: {e} '
                               f'(Time taken: {time.time() - reload_start}s)')
                else:
                    message = (f'sync reload done. Output: {o} '
                               f'(Time taken: {time.time() - reload_start}s)')
            return message
    return None


plugins: Dict[str, Plugin] = {}

with open('supibot_auth.json', 'r') as f:
    supibot_auth = json.load(f)

util_bot.init_supibot_api(supibot_auth)

# make sure that the plugin manager loads before everything else.
load_file('plugins/plugin_manager.py')

load_file('plugins/auto_load.py')

util_bot.init_sqlalchemy(base_address)
twitchirc.logging.log = util_bot.make_log_function('TwitchIRC')
twitchirc.log = util_bot.make_log_function('TwitchIRC')

bot.handlers['chat_msg'].append(chat_msg_handler)
bot.add_command('perm')(util_bot.command_perm)
bot.add_command('join')(util_bot.command_join)
Exemple #25
0
                        required=True,
                        type=str,
                        help="Configuration (*.json).")
    parser.add_argument("-R",
                        "--resume",
                        action='store_true',
                        help="Resume experiment from latest checkpoint.")
    parser.add_argument("-L",
                        "--override",
                        action='store_true',
                        help="Only use first 100 examples")
    parser.add_argument("-D",
                        "--debug",
                        action='store_true',
                        help="Only use first 100 examples")
    args = parser.parse_args()

    with open(os.path.join(root, "configs", args.configuration)) as file:
        configuration = json5.load(file)

    configuration["experiment_name"], _ = os.path.splitext(
        os.path.basename(args.configuration))
    if args.debug:
        configuration["experiment_name"] += '_debug'

    configuration["root"] = root

    main(configuration,
         resume=args.resume,
         lr_override=args.override,
         debug=args.debug)
Exemple #26
0
    )

    loss_function = initialize_config(config["loss_function"])

    trainer_class = initialize_config(config["trainer"], pass_args=False)

    trainer = trainer_class(
        config=config,
        resume=resume,
        model=model,
        loss_function=loss_function,
        optimizer=optimizer,
        train_dataloader=train_dataloader,
        validation_dataloader=valid_dataloader
    )

    trainer.train()


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Wave-U-Net for Speech Enhancement")
    parser.add_argument("-C", "--configuration", required=True, type=str, help="Configuration (*.json).")
    parser.add_argument("-R", "--resume", action="store_true", help="Resume experiment from latest checkpoint.")
    args = parser.parse_args()

    configuration = json5.load(open(args.configuration))
    configuration["experiment_name"], _ = os.path.splitext(os.path.basename(args.configuration))
    configuration["config_path"] = args.configuration

    main(configuration, resume=args.resume)
Exemple #27
0
from .base import *
import json5
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases

with open(os.path.join(BASE_DIR, "config.json"), encoding='utf8') as file:
    CONFIG_DATA = json5.load(file)
    CONFIG_DATA["PRODUCTION"] = True
DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.mysql',
        'NAME': CONFIG_DATA['DBNAME'],
        'USER': CONFIG_DATA['DBACCOUNT'],
        'PASSWORD': CONFIG_DATA['DBPASSWORD'],
        'HOST': CONFIG_DATA['DBHOST'],
        'PORT': CONFIG_DATA['DBPORT'],
        'OPTIONS': {
            'charset': 'utf8mb4',
            'use_unicode': True,
        }
    }
}
Exemple #28
0
    def bus_defs_from_spec(cls, spec_path):
        """
        parse spec def and create the described slave+master bus interfaces 
        """
        with open(spec_path) as fin:
            spec = json5.load(fin)
        adkey = 'abstractionDefinition'
        bus_type = dotdict(spec[adkey]['busType'])
        abstract_type = dotdict({
            'vendor': spec[adkey]['vendor'],
            'library': spec[adkey]['library'],
            'name': spec[adkey]['name'],
            'version': spec[adkey]['version'],
        })

        master_req_ports = []
        master_opt_ports = []
        master_user_port_groups = []
        slave_req_ports = []
        slave_opt_ports = []
        slave_user_port_groups = []

        user_groups = []
        for portname, portdef in spec[adkey]['ports'].items():
            (
                is_user,
                user_group,
                req_port_map,
                opt_port_map,
            ) = cls.parse_port(portname, portdef)
            if is_user:
                user_groups.append(user_group)
            if 'onMaster' in req_port_map:
                master_req_ports.append(req_port_map['onMaster'])
            if 'onMaster' in opt_port_map:
                if is_user:
                    master_user_port_groups.append(
                        (user_group, opt_port_map['onMaster']))
                else:
                    master_opt_ports.append(opt_port_map['onMaster'])
            if 'onSlave' in req_port_map:
                slave_req_ports.append(req_port_map['onSlave'])
            if 'onSlave' in opt_port_map:
                if is_user:
                    slave_user_port_groups.append(
                        (user_group, opt_port_map['onSlave']))
                else:
                    slave_opt_ports.append(opt_port_map['onSlave'])

        if len(user_groups) > 1:
            # cannot have an anonymous ('') user group if there is more
            # than one specified
            assert not any([g == '' for g in user_groups])
            prefix_match = False
            for p1, p2 in combinations(user_groups, 2):
                prefix_match |= (p1.startswith(p2) or p2.startswith(p1))
            assert not prefix_match, \
                "ambiguous user_groups specified:{}".format(user_groups)

        bus_defs = []
        if master_req_ports != []:
            bus_defs.append(
                BusDef(
                    bus_type,
                    abstract_type,
                    'master',
                    master_req_ports,
                    master_opt_ports,
                    master_user_port_groups,
                ))
        if slave_req_ports != []:
            bus_defs.append(
                BusDef(
                    bus_type,
                    abstract_type,
                    'slave',
                    slave_req_ports,
                    slave_opt_ports,
                    slave_user_port_groups,
                ))
        return bus_defs
Exemple #29
0
import json5

s = '''
{a:"b\\n\
"}
'''
with open('TTL.json5', 'r', encoding='utf-8') as f:
    print(json5.load(f))
def process(file_name, input, clients, output, distance_stops_to_stops, time_stops_to_stops,
            distance_stops_to_customers, time_stops_to_customer):
    k = 1
    mode = {'0': 'driving', '1': 'walking'}

    for itera in range(2):
        # Rename columns names in order to make them latitud and longitud
        if itera == 0:
            dataframe = pandas.read_csv(file_name)
            sub = dataframe[['latitud', 'longitud']]
            dataframe2 = pandas.read_csv(file_name)
            sub2 = dataframe2[['latitud', 'longitud']]
        else:
            dataframe = pandas.read_csv(file_name)
            dataframe = dataframe[dataframe.index > 0]
            dataframe = dataframe.reset_index()
            sub = dataframe[['latitud', 'longitud']]
            dataframe2 = pandas.read_csv(clients)
            sub2 = dataframe2[['latitud', 'longitud']]
        dffinald = []
        dffinalt = []
        l = 0
        p = 0
        for i in range(len(sub.latitud)):
            dffinald.append([])
            dffinalt.append([])
            # Agrega una fila
            for j in range(len(sub2.latitud)):

                orig_coord = (sub.latitud[i], sub.longitud[i])
                dest_coord = (sub2.latitud[j], sub2.longitud[j])
                try:
                    #                url = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={0}&destinations={1}&mode='+str(mode[str(itera)])+'&sensor=false&key={2}'.format(str(orig_coord),str(dest_coord),str(apis[str(k)]))
                    #                url = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={0}&destinations={1}&mode={2}&sensor=false&key={3}'.format(str(orig_coord),str(dest_coord),+str(mode[str(itera)]),str(apis[str(k)]))
                    url = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={0}&destinations={1}&mode={2}&sensor=false&key={3}'.format(
                        str(orig_coord), str(dest_coord), str(mode[str(itera)]), str(apis[str(k)]))
                    result = json5.load(urllib.urlopen(url))
                    #print result
                    tript = result['rows'][0]['elements'][0]['duration']['value']
                    tripd = result['rows'][0]['elements'][0]['distance']['value']
                    dffinald[i].append(tripd)
                    dffinalt[i].append(tript)
                    #print (i, j, 'ok', k)

                except AttributeError:
                    k = k + 1
                    #print (i, j, 'AE', k)
                    url = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={0}&destinations={1}&mode={2}&sensor=false&key={3}'.format(
                        str(orig_coord), str(dest_coord), str(mode[str(itera)]), str(apis[str(k)]))
                    result = json5.load(urllib._urlopener(url))
                    tript = result['rows'][0]['elements'][0]['duration']['value']
                    tripd = result['rows'][0]['elements'][0]['distance']['value']
                    dffinald[i].append(tripd)
                    dffinalt[i].append(tript)
                    #print result
                except IOError:
                    #print (i, j, 'IO', k)
                    url = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={0}&destinations={1}&mode={2}&sensor=false&key={3}'.format(
                        str(orig_coord), str(dest_coord), str(mode[str(itera)]), str(apis[str(k)]))
                    result = json5.load(urllib._urlopener(url))
                    tript = result['rows'][0]['elements'][0]['duration']['value']
                    tripd = result['rows'][0]['elements'][0]['distance']['value']
                    dffinald[i].append(tripd)
                    dffinalt[i].append(tript)
                except IndexError:
                    #print (i, j, 'IE', k)
                    url = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={0}&destinations={1}&mode={2}&sensor=false&key={3}'.format(
                        str(orig_coord), str(dest_coord), str(mode[str(itera)]), str(apis[str(k)]))
                    result = json5.load(urllib._urlopener(url))
                    tript = result['rows'][0]['elements'][0]['duration']['value']
                    tripd = result['rows'][0]['elements'][0]['distance']['value']
                    dffinald[i].append(tripd)
                    dffinalt[i].append(tript)

                l = l + 1
                p = p + 1
                #print (i, j, l, p)
                if (l == 2400):
                    l = 0
                    k = k + 1
        if itera == 0:
            pandas.DataFrame(dffinald).to_csv(distance_stops_to_stops, index=False, header=None)
            pandas.DataFrame(dffinalt).to_csv(time_stops_to_stops, index=False, header=None)
        else:
            pandas.DataFrame(dffinald).to_csv(distance_stops_to_customers, index=False, header=None)
            pandas.DataFrame(dffinalt).to_csv(time_stops_to_customer, index=False, header=None)
Exemple #31
0
import json
import os
import json5
import shutil
PATH_THIS_FLODER = os.path.dirname(os.path.abspath(__file__))
PATH_CONFIG = os.path.join(PATH_THIS_FLODER, 'config.json')
if not os.path.exists(PATH_CONFIG):
    shutil.copyfile(os.path.join(PATH_THIS_FLODER, 'config.json.example'),
                    PATH_CONFIG)
with open(PATH_CONFIG, 'r', encoding='utf-8') as f:
    configJs = json5.load(f)
Exemple #32
0
def load_metadata(filename):
    return json5.load(open(filename))
        try:
            entries = tg.tierDict[spkr].entryList
        except:
            # print('no entry', spkr)
            continue
        for entry in entries:  # create label
            st = int(entry.start * sr / stride)
            ed = int(entry.end * sr / stride)
            if ed > num_frames:
                ed = num_frames
            label[idx, st:ed] = 1  # use slicing for faster writings
    return label


with open(human_label_root / "test.scp") as file:
    data = json5.load(file)
error_total, base_total = defaultdict(float), defaultdict(float)
for wavfile, annofile in data.items():
    # human label
    label_human = get_label(wavfile, annofile)
    label_human = torch.Tensor(label_human).unsqueeze(0)

    # machine label
    annofile_tmp = annofile.replace(str(human_label_root),
                                    str(lena_label_root))
    annofile_tmp = "_".join(annofile_tmp.split("_")[:-1])
    lenafile = annofile_tmp + ".textgrid"
    assert os.path.exists(lenafile), (annofile, lenafile)
    label_lena = get_label(wavfile, lenafile)
    label_lena = torch.Tensor(label_lena).unsqueeze(0)