Esempio n. 1
0
    def change_size(self, config):
        """Record the change in window size to the settings.toml file."""
        if self.dropdown_box.currentIndex() != -1:
            config.setdefault('view_options', {})['window_size'] = self.dropdown_box.currentIndex()

        with open(self.user_config_file, 'w') as conffile:
            toml.dump(config, conffile)
Esempio n. 2
0
 def teardown(self) -> None:
     self.settings['mastodon']['last_look'] = self.last_look.datetime
     with open(self.toml_file, "w") as f:
         toml.dump(self.settings, f)
         logging.info("saved mastodon config at: %s", self.toml_file)
     if not self.multiplexing:
         raise NotImplementedError("Use multiplexing.")
def save_config(attrs, path):
    '''Saves a configuration dictionary to a file.
    Args:
      path: the path to save the configuration dictionary to.
    '''

    toml.dump(attrs, path)
Esempio n. 4
0
 def teardown(self):
     self.settings['reddit']['last_look'] = self.last_look.datetime
     with open(self.toml_file, "w") as f:
         toml.dump(self.settings, f)
     if not self.multiplexing:
         self.pyborg.save_all()
         print("I know {} words ({} lines) now.".format(self.pyborg.settings.num_words, len(self.pyborg.lines)))
Esempio n. 5
0
 def write_user_config(self, config):
     '''
     Update local user preferences
     '''
     user_config = deep_merge(self.load_file(self.user_config_path), config)
     with open(self.user_config_path, 'w') as f:
         toml.dump(user_config, f)
         log.info('Updated local config', path=self.user_config_path)
Esempio n. 6
0
    def playlist_save_setting(self, config):
        """Change the save playlist on close behavior of the music player."""
        if self.playlist_save_checkbox.isChecked():
            config.setdefault('playlist', {})['save_on_close'] = True

        elif not self.playlist_save_checkbox.isChecked():
            config.setdefault('playlist', {})['save_on_close'] = False

        with open(self.user_config_file, 'w') as conffile:
            toml.dump(config, conffile)
Esempio n. 7
0
    def dock_positon_settings(self, config):
        """Write to the settings.toml the radio button chosen by the user."""
        if self.dock_left_side.isChecked():
            config.setdefault('dock', {})['position'] = 'left'

        elif self.dock_right_side.isChecked():
            config.setdefault('dock', {})['position'] = 'right'

        with open(self.user_config_file, 'w') as conffile:
            toml.dump(config, conffile)
Esempio n. 8
0
def _add_dependencies(cargo_toml_path, additional_dependencies):
    with open(cargo_toml_path, 'r+') as f:
        cargo_toml = toml.load(f)
        cargo_toml.setdefault('dependencies', {})
        for dep in additional_dependencies:
            name, _, spec = dep.partition(':')
            cargo_toml['dependencies'][name] = spec or '*'
        f.seek(0)
        toml.dump(cargo_toml, f)
        f.truncate()
def run(conf_file, verbose, debug, keep):
    settings = toml.load(conf_file)
    if debug:
        logging.basicConfig(level=logging.DEBUG)
    if verbose:
        logging.basicConfig(level=logging.INFO)
    cli = docker.from_env()
    cach = Cachet(settings['cachet']['url'], settings['cachet']['api_key'])
    settings = main(cli, cach, settings)
    if keep:
        with open(conf_file, 'w') as f:
            toml.dump(settings, f)
Esempio n. 10
0
    def select_media_library(self, config):
        """Open a file dialog to allow the user to select the media library path.

        The selected path is written to settings.toml.
        """
        library = QFileDialog.getExistingDirectory(self, 'Select Media Library Directory')
        if library:
            self.media_library_line.setText(library)

            config['media_library']['media_library_path'] = library

            with open(self.user_config_file, 'w') as conffile:
                toml.dump(config, conffile)
Esempio n. 11
0
    def load_settings(self):
        toml_path = os.path.join(click.get_app_dir("Pyborg"), "pyborg.toml")

        if os.path.exists(click.get_app_dir("Pyborg")) and not os.path.exists(toml_path):
            settings = {'pyborg-core': {"max_words": False}}
            toml.dump(settings, open(toml_path, "w"))

        d = toml.load(toml_path)['pyborg-core']
        if d['max_words']:
            cfg = FakeCfg2(max_words=d['max_words'])
        else:
            cfg = FakeCfg2(max_words=50000)
        return cfg
Esempio n. 12
0
    def cover_art_playback_setting(self, config):
        """Change the cover art playback behavior of the music player.

        The default setting allows for the current media
        to be paused and played with mouse button clicks on the cover art.
        """
        if self.cover_art_playback.isChecked():
            config.setdefault('playback', {})['cover_art'] = True

        elif not self.cover_art_playback.isChecked():
            config.setdefault('playback', {})['cover_art'] = False

        with open(self.user_config_file, 'w') as conffile:
            toml.dump(config, conffile)
Esempio n. 13
0
    def playlist_view_settings(self, config):
        """Change the behavior of the Playlist dock widget.

        The default setting hides the dock on application start. With this option
        checked, the playlist dock will show on start.
        """
        if self.playlist_view_button.isChecked():
            config.setdefault('playlist', {})['show_on_start'] = True

        elif not self.playlist_view_button.isChecked():
            config.setdefault('playlist', {})['show_on_start'] = False

        with open(self.user_config_file, 'w') as conffile:
            toml.dump(config, conffile)
Esempio n. 14
0
def delete(args):
    config = {TITLE_STR: "PyAutoHome Configuration", DEV_STR: {}}

    try:
        with open(TOML_CONF, "r") as conf_file:
            config = toml.loads(conf_file.read())
    except IOError:
        pass

    if args.name in config[DEV_STR]:
        print "Deleting device %s..." % args.name
        del config[DEV_STR][args.name]
        with open(TOML_CONF, "w") as conf_file:
            toml.dump(config, conf_file)
    else:
        print "Device %s not found..." % args.name
Esempio n. 15
0
 def save_if_dirty(self):
     if self.dirty:
         self.lock = False
         with open(self.path, 'w') as f:
             self.__db = toml.dump(self.__db, f)
             self.dirty = False
         self.lock = True
Esempio n. 16
0
def set_database_path(dbfolder):
    """Use to write the database path into the config.

    Parameters
    ----------
    dbfolder : str or pathlib.Path
        Path to where planetpy will store data it downloads..
    """
    # First check if there's a config file, so that we don't overwrite
    # anything:
    try:
        config = toml.load(str(configpath))
    except IOError:  # config file doesn't exist
        config = {}  # create new config dictionary

    # check if there's an `data_archive` sub-dic
    try:
        archive_config = config["data_archive"]
    except KeyError:
        config["data_archive"] = {"path": dbfolder}
    else:
        archive_config["path"] = dbfolder

    with open(configpath, "w") as f:
        ret = toml.dump(config, f)
    print(f"Saved database path {ret} into {configpath}.")
 def save(self):
     fn = self.config_file
     try:
         os.makedirs(self.config_home)
     except OSError:
         pass
     with open(fn, 'w') as f:
         return toml.dump(self.config, f)
Esempio n. 18
0
def write_config(seeds=None):
    existing_config = read_config()
    config = copy.deepcopy(existing_config)

    svc_config = json.loads(
        subprocess.check_output(['config-get', '--format=json']))
    if svc_config['log-level'] in ('debug', 'info', 'warn', 'error'):
        config['logging']['level'] = svc_config['log-level']

    if seeds is not None:
        config['cluster']['seed-servers'] = seeds

    if config == existing_config:
        return False

    with open(CONFIG_PATH, 'w') as fh:
        toml.dump(config, fh)
    subprocess.check_output(['service', 'influxdb', 'restart'])
    return True
Esempio n. 19
0
def add(args):
    print "Adding device %s ..." % args.name

    config = {TITLE_STR: "PyAutoHome Configuration", DEV_STR: {}}

    try:
        with open(TOML_CONF, "r") as conf_file:
            config = toml.loads(conf_file.read())
    except IOError:
        pass

    config[DEV_STR][args.name] = { \
    DEV_TYPE_STR: args.dev_type, \
    COMM_TYPE_STR: args.comm_type, \
        ADDR_STR: args.addr, \
    CMD_STR: args.commands \
    }

    with open(TOML_CONF, "w") as conf_file:
        toml.dump(config, conf_file)
Esempio n. 20
0
 def save_config(self):
     """ Save configuration changes in config file """
     with open(AppUtil.get_config_file_path(), "w") as f:
         toml.dump(self.config, f)
def write_validator_config(sawtooth_home, **kwargs):
    with open(os.path.join(sawtooth_home, 'etc',
                           'validator.toml'), mode='w') as out:
        toml.dump(kwargs, out)
Esempio n. 22
0
def write_toml(data, output):

    with open(output, 'wt') as f:
        toml.dump(data, f)
Esempio n. 23
0
def main(args):

    random.seed(args.seed)
    np.random.seed(args.seed)
    os.makedirs(args.output_directory, exist_ok=True)

    read_idx = 0
    chunk_idx = 0
    chunk_count = 0

    min_bases = 0
    max_bases = 0
    off_the_end_ref = 0
    off_the_end_sig = 0
    min_run_count = 0
    read_too_short = 0
    homopolymer_boundary = 0

    total_reads = num_reads(args.chunkify_file)

    chunks = np.zeros((args.chunks, args.max_seq_len * args.max_samples_per_base), dtype=np.float32)
    chunk_lengths = np.zeros(args.chunks, dtype=np.uint16)

    targets = np.zeros((args.chunks, args.max_seq_len), dtype=np.uint8)
    target_lengths = np.zeros(args.chunks, dtype=np.uint16)

    with open(os.path.join(args.output_directory, 'config.toml'), 'w') as conf:
        toml.dump(dict(chunks=vars(args)), conf)

    for read_id, samples, reference, pointers in get_reads(args.chunkify_file):

        read_idx += 1

        squiggle_duration = len(samples)
        sequence_length = len(reference) - args.offset - 1

        if sequence_length < args.max_seq_len + args.offset:
            read_too_short += 1
            continue

        # first chunk
        seq_starts = args.offset
        seq_ends = seq_starts + np.random.randint(args.min_seq_len, args.max_seq_len)

        repick = int((args.max_seq_len - args.min_seq_len) / 2)
        while boundary(reference[seq_starts:seq_ends]) and repick:
            seq_ends = seq_starts + np.random.randint(args.min_seq_len, args.max_seq_len)
            seq_ends = min(seq_ends, sequence_length)
            repick -= 1

        chunk_idxs = [(seq_starts, seq_ends)]

        # variable size chunks with overlap
        while seq_ends < sequence_length - args.min_seq_len:

            # overlap chunks with +/- 3% of max seq len
            overlap = np.int32(args.max_seq_len * 0.03)

            seq_starts = seq_ends + np.random.randint(-overlap, overlap)
            seq_ends = seq_starts + np.random.randint(args.min_seq_len, args.max_seq_len)
            seq_ends = min(seq_ends, sequence_length)

            repick = int((args.max_seq_len - args.min_seq_len) / 2)
            while boundary(reference[seq_starts:seq_ends]) and repick:
                seq_ends = seq_starts + np.random.randint(args.min_seq_len, args.max_seq_len)
                seq_ends = min(seq_ends, sequence_length)
                repick -= 1

            chunk_idxs.append((seq_starts, seq_ends))

        for start, end in chunk_idxs:

            chunk_idx += 1

            if end > sequence_length:
                print(read_id, end, sequence_length)
                off_the_end_ref += 1
                continue

            squiggle_start = pointers[start]
            squiggle_end = pointers[end + 1] # fence post mapping
            squiggle_length = squiggle_end - squiggle_start

            reference_length = end - start

            samples_per_base = squiggle_length / reference_length

            if samples_per_base < args.min_samples_per_base:
                min_bases += 1
                continue

            if samples_per_base > args.max_samples_per_base:
                max_bases += 1
                continue

            if squiggle_end > squiggle_duration:
                off_the_end_sig += 1
                continue

            longest_run = max(len(list(run)) for label, run in groupby(reference[start:end]))

            if longest_run < args.min_run:
                min_run_count += 1
                continue

            if boundary(reference[start:end]):
                homopolymer_boundary += 1
                # continue - include the chunk anyway

            chunks[chunk_count, :squiggle_length] = samples[squiggle_start:squiggle_end]
            chunk_lengths[chunk_count] = squiggle_length

            # index alphabet from 1 (ctc blank labels - 0)
            targets[chunk_count, :reference_length] = reference[start:end] + 1
            target_lengths[chunk_count] = reference_length

            chunk_count += 1

            if chunk_count == args.chunks:
                break

        if chunk_count == args.chunks:
            break

    skipped = chunk_idx - chunk_count
    percent = (skipped / chunk_idx * 100) if skipped else 0

    print("Processed %s reads of out %s [%.2f%%]" % (read_idx, total_reads, read_idx / total_reads * 100))
    print("Skipped %s chunks out of %s due to bad chunks [%.2f%%].\n" % (skipped, chunk_idx, percent))
    print("Reason for skipping:")
    print("  - off the end (signal)          ", off_the_end_sig)
    print("  - off the end (sequence)        ", off_the_end_ref)
    print("  - read too short (sequence)     ", read_too_short)
    print("  - homopolymer chunk boundary    ", homopolymer_boundary)
    print("  - longest run too short         ", min_run_count)
    print("  - minimum number of bases       ", min_bases)
    print("  - maximum number of bases       ", max_bases)

    if chunk_count < args.chunks:
        chunks = np.delete(chunks, np.s_[chunk_count:], axis=0)
        chunk_lengths = chunk_lengths[:chunk_count]
        targets = np.delete(targets, np.s_[chunk_count:], axis=0)
        target_lengths = target_lengths[:chunk_count]

    if args.chunks > args.validation_chunks:
        split = args.validation_chunks
        vdir = os.path.join(args.output_directory, "validation")
        os.makedirs(vdir, exist_ok=True)
        np.save(os.path.join(vdir, "chunks.npy"), chunks[:split])
        np.save(os.path.join(vdir, "chunk_lengths.npy"), chunk_lengths[:split])
        np.save(os.path.join(vdir, "references.npy"), targets[:split])
        np.save(os.path.join(vdir, "reference_lengths.npy"), target_lengths[:split])
    else:
        split = 0

    np.save(os.path.join(args.output_directory, "chunks.npy"), chunks[split:])
    np.save(os.path.join(args.output_directory, "chunk_lengths.npy"), chunk_lengths[split:])
    np.save(os.path.join(args.output_directory, "references.npy"), targets[split:])
    np.save(os.path.join(args.output_directory, "reference_lengths.npy"), target_lengths[split:])

    print()
    print("Training data written to %s:" % args.output_directory)
    print("  - chunks.npy with shape", chunks[split:].shape)
    print("  - chunk_lengths.npy with shape", chunk_lengths[split:].shape)
    print("  - references.npy with shape", targets[split:].shape)
    print("  - reference_lengths.npy shape", target_lengths[split:].shape)
Esempio n. 24
0
def save_config(cfg: Config, config_path="config.toml"):
    with open(config_path, 'w', encoding='utf-8') as save_file:
        data_to_save = json.loads(json.dumps(to_raw_type(cfg)))
        toml.dump(data_to_save, save_file)
Esempio n. 25
0
def _dump(data, stream):
    return toml.dump(data, stream, encoder=toml.TomlPreserveCommentEncoder())
Esempio n. 26
0
def save_csv(
    foldername,
    filename,
    device,
    device_name,
    total_mem,
    mean_time,
    std_time,
    mean_mem_used,
    std_mem_used,
    mean_mem_free,
    std_mem_free,
):
    """
    Save data in CSV

    Parameters
    ----------
    foldername : str
        foldername.
    filename : str
        dilename to save data.
    device : str
        device type.
    device_name : str
        GPU model.
    total_mem : float
        total memory in GPU.
    mean_time : dictionary
        mean time vs dimension.
    std_time : dictionary
        std time vs dimension.
    mean_mem_used : dictionary
        mean memory used vs dimension.
    std_mem_used : dictionary
        std memory used vs dimension.
    mean_mem_free : dictionary
        mean free memory vs dimension.
    std_mem_free : dictionary
        std free memory vs dimension.

    Returns
    -------
    None.

    """
    config = toml.load("../config.toml")
    m_time = sorted(mean_time.items())
    s_time = sorted(std_time.items())
    m_used = sorted(mean_mem_used.items())
    s_used = sorted(std_mem_used.items())
    m_free = sorted(mean_mem_free.items())
    s_free = sorted(std_mem_free.items())
    dimension, mean_time = zip(*m_time)
    _, std_time = zip(*s_time)
    _, mean_mem_used = zip(*m_used)
    _, std_mem_used = zip(*s_used)
    _, mean_mem_free = zip(*m_free)
    _, std_mem_free = zip(*s_free)
    data_frame = pd.DataFrame({
        "Dimension": list(dimension),
        "Mean Time": list(mean_time),
        "Std Time": list(std_time),
        "Mean Memory Used": list(mean_mem_used),
        "Std Memory Used": list(std_mem_used),
        "Mean Memory Free": list(mean_mem_free),
        "Std Memory Free": list(std_mem_free),
    })
    date = "_".join(str(time.ctime()).split())
    date = "_".join(date.split(":"))
    filename = filename + "_" + date
    config["last_folder"] = foldername
    config["last_stat_csv"] = filename
    f = open("../config.toml", "w")
    toml.dump(config, f)
    f.close()
    file = open("results/" + foldername + "/" + filename, "a")
    file.write("# Device: {0} \n".format(device))
    file.write("# Device Name: {0} \n".format(device_name))
    file.write("# Total GPU memory: {0} \n".format(total_mem))
    data_frame.to_csv(file)
    file.close()
Esempio n. 27
0
def gen(keysize, n, c, force, outfolder):
    try:
        os.mkdir(outfolder)
    except FileExistsError as e:
        if not force:
            print('Folder {} already exist, please add --force to override!'.
                  format(outfolder))
            sys.exit(-1)

    print('Going to generate {} replicas and {} clients in {} ...'.format(
        n, c, outfolder))

    _owd = os.getcwd()
    os.chdir(outfolder)

    port_index = 25600

    # generate keys for replicas and clients
    for name, count in [('replica', n), ('client', c)]:
        pubkeys = []
        for i in range(count):
            (pubkey, privkey) = rsa.newkeys(keysize)
            pubkeys.append(pubkey)

            privkey_fname = '{}_{}.rsa'.format(name, i)
            pubkey_fname = '{}_{}.rsa.pub'.format(name, i)

            with open(privkey_fname, 'w') as f:
                f.write(privkey.save_pkcs1().decode())

            with open(pubkey_fname, 'w') as f:
                f.write(pubkey.save_pkcs1().decode())

            with open('{}_{}.toml'.format(name, i), 'w') as f:
                d = {
                    'title': '{}_{}'.format(name, i),
                    'node': {
                        'index': i,
                        'type': name,
                        'private_key_file': privkey_fname,
                        'public_key_file': pubkey_fname,
                        'auth_interval': gintervals['auth'],
                    }
                }

                if name == 'replica':
                    # add additional parameters
                    for n in ('status', 'view_change', 'recovery', 'idle'):
                        d['node']['{}_interval'.format(n)] = gintervals[n]

                toml.dump(d, f)

        configs = dict()
        configs['title'] = '{}'.format(name)
        configs['nodes_count'] = count
        configs['nodes'] = []
        for i, k in enumerate(pubkeys):
            configs['nodes'].append({
                'index':
                i,
                'type':
                name,
                'public_key_file':
                '{}_{}.rsa.pub'.format(name, i),
                'ip':
                '127.0.0.1',
                'port':
                port_index,
            })
            port_index += 1

        with open('{}_configs.toml'.format(name), 'w') as f:
            toml.dump(configs, f)

    os.chdir(_owd)
    print('''Successfully generated!
Go to '{}' and tune parameters in public config files
according to your needs!'''.format(outfolder))
Esempio n. 28
0
 def dump_dictionary_to_toml_file(o: Mapping[str, Any],
                                  file_location: str) -> None:
     with open(file_location, "w+") as fd:
         toml.dump(o, fd)
Esempio n. 29
0
 def print_full_config(self, file: typing.TextIO) -> None:
     parser = self._loadConfig()
     data: Dict[str, Mapping[str, str]] = {}
     for section in parser.sections():
         data[section] = parser.get_section_str_to_any(section)
     toml.dump(data, file)  # pyre-ignore[T39129461]
Esempio n. 30
0
def save_config(config, output_folder):
    if not path.exists(output_folder):
        makedirs(output_folder)
    with open(path.join(output_folder, "train.toml"), "w") as f:
        toml.dump(config, f)
Esempio n. 31
0
 def _save(self):
     with open(self.ADDRESS_BOOK_PATH, 'w') as output_file:
         toml.dump(self._address_book, output_file)
Esempio n. 32
0
def main(args):

    workdir = os.path.expanduser(args.training_directory)

    if os.path.exists(workdir) and not args.force:
        print("[error] %s exists, use -f to force continue training." %
              workdir)
        exit(1)

    init(args.seed, args.device)
    device = torch.device(args.device)

    print("[loading data]")
    chunks, targets, lengths = load_data(limit=args.chunks,
                                         shuffle=True,
                                         directory=args.directory)

    split = np.floor(chunks.shape[0] * args.validation_split).astype(np.int32)
    train_dataset = ChunkDataSet(chunks[:split], targets[:split],
                                 lengths[:split])
    test_dataset = ChunkDataSet(chunks[split:], targets[split:],
                                lengths[split:])
    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=True)
    test_loader = DataLoader(test_dataset,
                             batch_size=args.batch,
                             num_workers=4,
                             pin_memory=True)

    config = toml.load(args.config)
    argsdict = dict(training=vars(args))

    chunk_config = {}
    chunk_config_file = os.path.join(args.directory, 'config.toml')
    if os.path.isfile(chunk_config_file):
        chunk_config = toml.load(os.path.join(chunk_config_file))

    os.makedirs(workdir, exist_ok=True)
    toml.dump({
        **config,
        **argsdict,
        **chunk_config
    }, open(os.path.join(workdir, 'config.toml'), 'w'))

    print("[loading model]")
    model = load_symbol(config, 'Model')(config)
    optimizer = AdamW(model.parameters(), amsgrad=False, lr=args.lr)

    last_epoch = load_state(workdir,
                            args.device,
                            model,
                            optimizer,
                            use_amp=args.amp)

    lr_scheduler = func_scheduler(optimizer,
                                  cosine_decay_schedule(1.0, 0.1),
                                  args.epochs * len(train_loader),
                                  warmup_steps=500,
                                  start_step=last_epoch * len(train_loader))

    if args.multi_gpu:
        from torch.nn import DataParallel
        model = DataParallel(model)
        model.decode = model.module.decode
        model.alphabet = model.module.alphabet

    if hasattr(model, 'seqdist'):
        criterion = model.seqdist.ctc_loss
    else:
        criterion = None

    for epoch in range(1 + last_epoch, args.epochs + 1 + last_epoch):

        try:
            with CSVLogger(os.path.join(
                    workdir, 'losses_{}.csv'.format(epoch))) as loss_log:
                train_loss, duration = train(model,
                                             device,
                                             train_loader,
                                             optimizer,
                                             criterion=criterion,
                                             use_amp=args.amp,
                                             lr_scheduler=lr_scheduler,
                                             loss_log=loss_log)

            model_state = model.state_dict(
            ) if not args.multi_gpu else model.module.state_dict()
            torch.save(model_state,
                       os.path.join(workdir, "weights_%s.tar" % epoch))

            val_loss, val_mean, val_median = test(model,
                                                  device,
                                                  test_loader,
                                                  criterion=criterion)
        except KeyboardInterrupt:
            break

        print(
            "[epoch {}] directory={} loss={:.4f} mean_acc={:.3f}% median_acc={:.3f}%"
            .format(epoch, workdir, val_loss, val_mean, val_median))

        with CSVLogger(os.path.join(workdir, 'training.csv')) as training_log:
            training_log.append(
                OrderedDict([('time', datetime.today()),
                             ('duration', int(duration)), ('epoch', epoch),
                             ('train_loss', train_loss),
                             ('validation_loss', val_loss),
                             ('validation_mean', val_mean),
                             ('validation_median', val_median)]))
Esempio n. 33
0
 def set_eden_config(self, config) -> None:
     config_d = pathlib.Path(self.etc_eden_dir) / "config.d"
     config_d.mkdir()
     with open(config_d / "systemd.toml", "w") as config_file:
         # pyre-fixme[6]: Expected `_Writable` for 2nd param but got `IO[Any]`.
         toml.dump(config, config_file)
Esempio n. 34
0
 def commit(self):
     """
     Commit the current configuration to a file.
     """
     toml.dump(self.config.data, self.config_file.open("w"))
Esempio n. 35
0
def write_running_config(config):
    with open(path + 'engine/running-config.cfg', 'w') as f:
        toml.dump(config, f)
Esempio n. 36
0
 def _write_config(self):
     with open("local_music_source/config.toml", "w") as f:
         toml.dump(config, f)
Esempio n. 37
0
def saveconfig(config, filename):
    with open(filename) as f:
        toml.dump(config,f)
Esempio n. 38
0
def save_pipfile(pipfile: Dict) -> None:
    with PIPFILE.open("w") as dst:
        toml.dump(pipfile, dst)
Esempio n. 39
0
import yfinance as yf
from pathlib import Path
import toml

# Globals
history = "6mo"  # valid periods = 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
frequency = "5m"  # valid intervals =  1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo

with open(Path("src/stock_choices.toml")) as stock_choices_file:
    stock_choices = toml.load(stock_choices_file)

selection_set = stock_choices["requests"]["lse"]["personal"]

for key_stock, name_stock in selection_set.items():

    stock = yf.Ticker(f"{key_stock}.L")
    historical = stock.history()
    toml.dump(
        o={"ticker": key_stock, "name": name_stock, "historical": historical},
        f=f"stocks/{key_stock}.toml",
    )

Esempio n. 40
0
 def dump_data(data):
     with open("available_materials.toml", "w") as f:
         toml.dump(data, f)
Esempio n. 41
0
    def append_node(self, node):
        # For append mode: use the first element to store the new node
        if isinstance(node, NetworkAddressList):
            node = node[0]

        node_id = len(self.nodes)
        self.nodes.add_after_check(node['host'], node['port'])
        node_dir = os.path.join(self.output_root, '{}'.format(node_id))

        shutil.copytree(self.configs_dir, node_dir, False)

        forever_config = os.path.join(node_dir, 'forever.toml')
        with open(forever_config, 'rt') as stream:
            forever_data = toml.load(stream)
            for process_data in forever_data['process']:
                if process_data['args'].count('-s') > 0:
                    process_data['args'].remove('-s')
                if self.stdout:
                    process_data['args'].append('-s')
        with open(forever_config, 'wt') as stream:
            toml.dump(forever_data, stream)

        jsonrpc_config = os.path.join(node_dir, 'jsonrpc.toml')
        with open(jsonrpc_config, 'rt') as stream:
            jsonrpc_data = toml.load(stream)
            jsonrpc_data['http_config']['listen_port'] \
                = str(int(
                    jsonrpc_data['http_config']['listen_port']) + node_id)
            jsonrpc_data['ws_config']['listen_port'] \
                = str(int(
                    jsonrpc_data['ws_config']['listen_port']) + node_id)
            if self.enable_version:
                jsonrpc_data['enable_version'] = True
            else:
                jsonrpc_data['enable_version'] = False
        with open(jsonrpc_config, 'wt') as stream:
            toml.dump(jsonrpc_data, stream)

        with open(os.path.join(node_dir, '.env'), 'wt') as stream:
            stream.write(
                'AMQP_URL=amqp://guest:guest@localhost/{}/{}\n'.format(
                    self.node_prefix, node_id))
            stream.write('DATA_PATH=./data\n')

        privkey = node.get('privkey')
        privkey_config = os.path.join(node_dir, 'privkey')
        with open(privkey_config, 'wt') as stream:
            if privkey:
                stream.write(privkey)
            stream.write('\n')

        address = node.get('address')
        if address:
            address_config = os.path.join(node_dir, 'address')
            with open(address_config, 'wt') as stream:
                stream.write(address)
                stream.write('\n')

        network_full_config = os.path.join(self.configs_dir, 'network.toml')
        with open(network_full_config, 'rt') as stream:
            network_data = toml.load(stream)
            network_data['peers'].append(self.create_peer_data(node_id, node))
            config = network_data['peers']
        with open(network_full_config, 'wt') as stream:
            toml.dump(network_data, stream)

        for old_id in range(0, node_id):
            old_dir = os.path.join(self.output_root, '{}'.format(old_id))
            network_config = os.path.join(old_dir, 'network.toml')
            with open(network_config, 'rt') as stream:
                network_data = toml.load(stream)
                network_data['peers'].append(
                    self.create_peer_data(node_id, node))
            with open(network_config, 'wt') as stream:
                current_ip = config[old_id]['ip']
                stream.write(f'# Current node ip is {current_ip}\n')
                toml.dump(network_data, stream)

        network_config = os.path.join(node_dir, 'network.toml')
        with open(network_config, 'rt') as stream:
            network_data = toml.load(stream)
            network_data['port'] = node['port']
            if self.enable_tls:
                network_data['enable_tls'] = True
        with open(network_config, 'wt') as stream:
            current_ip = config[node_id]['ip']
            stream.write(f'# Current node ip is {current_ip}\n')
            toml.dump(network_data, stream)

        with open(self.nodes_list, 'at') as stream:
            stream.write('{}:{}\n'.format(node['host'], node['port']))
Esempio n. 42
0
def main() -> None:
    now = datetime.datetime.now()
    default_day = now.day
    default_year = now.year

    argp = argparse.ArgumentParser(description=DESCRIPTION)
    argp.add_argument(
        "-d",
        "--day",
        type=int,
        choices=range(1, 25 + 1),
        default=default_day,
        required=False,
    )
    argp.add_argument(
        "-y",
        "--year",
        type=int,
        choices=range(2015, default_year + 1),
        default=default_year,
        required=False,
    )
    argv = argp.parse_args()
    day: int = argv.day
    year: int = argv.year

    crate = f"day{day:02}"
    crate_path = pathlib.Path(crate)

    if crate_path.exists():
        print(f"{crate} already exists.")
        return

    with open("Cargo.toml") as manifest_f:
        manifest = toml.load(manifest_f)

    manifest["workspace"]["members"].append(crate)

    with open("Cargo.toml", "w") as manifest_f:
        toml.dump(manifest, manifest_f)

    subprocess.run(["cargo", "new", "--bin", crate], check=True)

    src = crate_path / "src"

    with (src / "main.rs").open("w") as main:
        main.write(MAIN.format(crate=crate))

    with (src / "lib.rs").open("w") as lib:
        lib.write(LIB.format(crate=crate))

    with open("session.txt") as session_f:
        session = session_f.read().strip()

    with (src / "input.txt").open("w", newline="\n") as input:
        resp = requests.get(
            f"https://adventofcode.com/{year}/day/{day}/input",
            cookies={"session": session},
        )
        resp.raise_for_status()
        input.write(resp.text)

    webbrowser.open_new(f"https://adventofcode.com/{year}/day/{day}")
Esempio n. 43
0
def create_network(
    obj,
    target_path,
    network_name,
    genesis_in,
    number_of_nodes,
    cluster,
    discovery_strategy,
):
    if network_name is None:
        network_name = os.path.basename(target_path)

    # Create the network output directories.
    show_val("Output path", target_path)
    os.mkdir(target_path)
    chain_path = os.path.join(target_path, "chain")
    os.mkdir(chain_path)

    # Prepare paths and copy over all contracts.
    show_val("WASM contracts", obj["wasm_dir"])
    contract_paths = {}
    for contract in CONTRACTS:
        key = "{}_installer_path".format(contract)
        basename = "{}.wasm".format(contract)
        source = os.path.join(obj["wasm_dir"], "{}_install.wasm".format(contract))
        target = os.path.join(chain_path, "{}.wasm".format(contract))
        shutil.copy(source, target)

        # We use relative paths when creating a self-contained network.
        contract_paths[contract] = basename

    # Update chainspec values.
    chainspec = create_chainspec(
        obj["chainspec_template"], network_name, genesis_in, contract_paths
    )

    chainspec_path = os.path.join(chain_path, "chainspec.toml")
    toml.dump(chainspec, open(chainspec_path, "w"))
    show_val("Chainspec", chainspec_path)

    # Setup each node, collecting all pubkey hashes.
    show_val("Node config template", obj["config_template"])
    show_val("Number of nodes", number_of_nodes)
    show_val("Discovery strategy", discovery_strategy)
    pubkeys = {}
    for n in range(number_of_nodes):
        if discovery_strategy == "root":
            known_nodes = [0]
        else:
            raise ValueError(
                "unknown discovery strategy: {}".format(discovery_strategy)
            )

        node_path = os.path.join(target_path, "node-{}".format(n))
        os.mkdir(node_path)
        pubkey_hex = create_node(
            n,
            obj["casper_client_argv0"],
            network_name,
            obj["config_template"],
            node_path,
            cluster,
            known_nodes,
        )
        pubkeys[n] = pubkey_hex

    accounts_path = os.path.join(chain_path, "accounts.csv")
    show_val("accounts file", accounts_path)
    create_accounts_csv(open(accounts_path, "w"), pubkeys)
Esempio n. 44
0
 def teardown(self) -> None:
     self.settings['twitter']['last_look'] = self.last_look.datetime
     with open(self.toml_file, "w") as f:
         toml.dump(self.settings, f)
     if not self.multiplexing:
         self.pyborg.save_all()
Esempio n. 45
0
import toml

d = {
    "/": {
        "200": {
            "description": "ok response"
        },
        "default": {
            "description": "default response"
        },
    },
}

with open("schema.toml", "w") as wf:
    toml.dump(d, wf)
with open("schema.toml", "r") as rf:
    from_toml = toml.load(rf)
Esempio n. 46
0
 def save(self):
     with open(self.path, "w") as fh:
         toml.dump(self.data, fh)
Esempio n. 47
0
def create_temp_model(request, mpi_tmpdir, case_gen, persist=False):
    """
    Set up a test case from HDF5 datafiles

    Returns the path of the toml file
    """
    from mpi4py import MPI

    tv = request.node.get_closest_marker("tv") is not None

    tmpdir = mpi_tmpdir
    data_dir = pytest.data_dir

    case_dir = pytest.case_dir / case_gen['case_dir']

    # Find the toml file for case
    toml_file = case_dir / case_gen['toml_file']

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if rank == 0:

        # Test case expects to find data in 'input'
        destdir = tmpdir / "input"
        destdir.mkdir()

        # Load toml config (for modification)
        config = toml.load(toml_file)

        # Override settings with Taylor verification specific stuff
        if tv and "tv_settings" in case_gen:
            fu.dict_update(config, case_gen['tv_settings'])

        # Turn off inversion verbose to keep test output clean
        config['inversion']['verbose'] = False

        # If doing Taylor verification, only take 1 sample:
        config['time']['num_sens'] = 1

        # and write out the toml to tmpdir
        with open(tmpdir / toml_file.name, 'w') as toml_out:
            toml.dump(config, toml_out)

        # ##### File Copies #######

        # Get the directory of the input files
        indata_dir = pytest.data_dir
        if ('data_dir' in case_gen):
            indata_dir = case_gen['data_dir'] / config['io']['input_dir']

        # Collect data files to be copied...
        copy_set = set()
        for f in config['io']:
            if "data_file" in f:
                copy_set.add(indata_dir / config['io'][f])

        # ...including velocity observations
        copy_set.add(indata_dir / config['obs']['vel_file'])

        # And copy them
        for f in copy_set:
            shutil.copy(f, destdir)

        # Copy or generate the mesh
        mesh_filename = config["mesh"]["mesh_filename"]
        mesh_file = (data_dir / case_gen['data_dir'] /
                     config['io']['input_dir'] / mesh_filename)

        try:
            mesh_ff_filename = config["mesh"]["bc_filename"]
            mesh_ff_file = (data_dir / case_gen['data_dir'] / "input" /
                            mesh_ff_filename)
        except KeyError:
            mesh_ff_file = None

        # Generate mesh if it doesn't exist
        # TODO - not totally happy w/ logic here:
        # ismipc tests generate their own meshes, ice_stream doesn't
        if not (destdir / mesh_filename).exists():

            if (mesh_file.exists()):
                shutil.copy(mesh_file, destdir)
                if mesh_file.suffix == ".xdmf":
                    shutil.copy(mesh_file.with_suffix(".h5"), destdir)

                if mesh_ff_file:
                    shutil.copy(mesh_ff_file, destdir)
                    shutil.copy(mesh_ff_file.with_suffix(".h5"), destdir)

            else:
                gen_rect_mesh.gen_rect_mesh(case_gen['mesh_nx'],
                                            case_gen['mesh_ny'], 0,
                                            case_gen['mesh_L'], 0,
                                            case_gen['mesh_L'],
                                            str(destdir / mesh_filename))

    case_gen["work_dir"] = Path(tmpdir)
    case_gen["toml_filename"] = toml_file.name

    if persist:
        pytest.active_cases.append(case_gen)

    comm.barrier()
    return case_gen
Esempio n. 48
0
            prec1_2, prec5_2 = accuracy(output[-1].detach().cpu(), target2.detach().cpu(), topk=(1, 5))
            prec1, prec5 = ((prec1_1 + prec1_2) / 2.0, (prec5_1 + prec5_2) / 2.0)
            losses.update(loss.item(), input.size(0))
            top1.update(prec1.item(), input.size(0))
            top5.update(prec5.item(), input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % config["print_freq"] == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                      'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                      'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                          i, len(val_loader), batch_time=batch_time, loss=losses,
                          top1=top1, top5=top5))

        print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
              .format(top1=top1, top5=top5))

        acc_filename = 'entropy_selected_dropout_acc.toml'
        with open(acc_filename, 'r') as f:
            accs = toml.load(f)

        accs[f'{config_name}_{args.mode}_{str(d_rate)}'] = top1.avg

        with open(acc_filename,'w') as f:
            toml.dump(accs, f)
Esempio n. 49
0
 def display(self):
     """
     Display the current configuration.
     """
     toml.dump(self.config.data, sys.stdout)
Esempio n. 50
0
def query_builder() -> str:
    typer.secho(
        """
    FORMATTING AND TIPS

    GENERAL FORMATTING
        [Tip] You are not supposed to answer questions if it is not [REQUIRED] 
        If you want to skip that question, just press space then enter.

    URL
        [Formatting: sc-domain:example.com or https://example.com]

    DATES
        [Formatting] Dates are in YYYY-MM-DD format.
        [Example] 23 march 2020 | 2020-03-10 | 2 weeks and 4 months ago 

    FILTERS
        [Formatting] If you want to add multiple filters split them by ',' 
        [Example] country equals FRA, device notContains tablet
        [Suggested Format] dimensions, operator, expression
    
    GRANULARITY
        Granularity specifies the frequency of the data, higher frequency means higher response time.
        [Examples] If you specify 'monday' seoman returns results only from mondays between start date and end date.
        [Examples] If you specify 'fivedaily' it splits your date range by 5 then runs unique queries.
        if your start date is 2020-03-10 and the end date is 2020-04-10 it first sends query for 03-10 to 03-15 then 03-15 to 03-20 then merges them all.   

    DIMENSIONS
        [Valid Parameters] page, query, date, device, country | for simplicity you can type 'all' to include all of them.
    
    EXPORT TYPE
        [Valid Parameters] excel, csv, json, tsv.

    ROW LIMIT
        [Valid Parameters] Must be a number from 1 to 25000.

    START ROW 
        [Valid Parameters] Must be a non-negative number.

    """,
        fg=typer.colors.BRIGHT_GREEN,
        bold=True,
    )

    questions = [
        inquirer.Text("url", message="[Required] The site's URL"),
        inquirer.Text(
            "start_date",
            message="[Required] Start date of the requested date range",
        ),
        inquirer.Text(
            "end_date",
            message="[Required] End date of the requested date range",
        ),
    ]

    answers = inquirer.prompt(questions)
    url = answers.get("url", "")
    start_date = answers.get("start_date", "")
    end_date = answers.get("end_date", "")

    questions = [
        inquirer.List(
            "dimensions",
            message=
            "Which dimensions of Search Analytics you would like to group by?",
            choices=[
                "all [date, query, page, device, country]",
                "keywords & pages [date, query, page]",
                "by devices [date, device]",
                "by countries [date, countries]",
                "custom [Choose from: date - query - page - device - country]",
            ],
        ),
    ]

    answers = inquirer.prompt(questions)

    if (answers.get("dimensions") ==
            "custom [Choose from: date - query - page - device - country]"):
        questions = [
            inquirer.Checkbox(
                "dimensions",
                message=
                "Which dimensions of Search Analytics you would like to group by?",
                choices=["date", "query", "page", "country", "device"],
            ),
        ]
        answers = inquirer.prompt(questions)
        dimensions = answers.get("dimensions", [])

    else:
        dimensions = answers.get("dimensions", "")

    questions = [
        inquirer.Text(
            "filters",
            message=
            "Zero or more groups of filters to apply to the dimension grouping values",
        ),
        inquirer.Text(
            "start_row",
            message="First row of the response [Known as start-row]",
        ),
        inquirer.Text(
            "row_limit",
            message="The maximum number of rows to return [0-25000]",
        ),
        inquirer.List(
            "search_type",
            message="The search type to filter for",
            choices=["web", "image", "video"],
            default="web",
        ),
        inquirer.List(
            "export",
            message="The export type for the results",
            choices=["xlsx", "csv", "json", "tsv"],
        ),
    ]

    answers = inquirer.prompt(questions)
    filters = answers.get("filters", "")
    start_row = answers.get("start_row", "")
    row_limit = answers.get("row_limit", "")
    search_type = answers.get("search_type", "")
    export = answers.get("export", "")

    query: Dict[str, Dict[str, Any]] = {"query": {}}
    all_dimensions = ["page", "query", "date", "device", "country"]

    if len(url) > 5:
        query["query"].update({"url": url})

    if start_date.strip() != "":
        query["query"].update(
            {"start-date": process_date(dt=start_date, which_date="start")})

    if end_date.strip() != "":
        query["query"].update(
            {"end-date": process_date(dt=end_date, which_date="end")})

    if isinstance(dimensions, str):
        if dimensions == "all [date, query, page, device, country]":
            query["query"].update(
                {"dimensions": ["date", "query", "page", "device", "country"]})

        elif dimensions == "keywords & pages [date, query, page]":
            query["query"].update({"dimensions": ["date", "query", "page"]}, )

        elif dimensions == "by devices [date, device]":
            query["query"].update({"dimensions": ["date", "device"]}, )

        elif dimensions == "by country [date, country]":
            query["query"].update({"dimensions": ["date", "country"]}, )
    else:
        query["query"].update({"dimensions": [dim for dim in dimensions]})

    if filters.strip() != "":
        query["query"].update(
            {"filters": [filt for filt in filters.split(",")]})

    if start_row.strip() != "" and start_row.isnumeric():
        query["query"].update({"start-row": start_row})

    if row_limit.strip() != "" and row_limit.isnumeric():
        if int(row_limit) >= 25000:
            row_limit = "25000"
        query["query"].update({"row-limit": row_limit.strip()})

    if search_type.strip() != "":
        query["query"].update({"search-type": search_type.strip().lower()})

    if export.strip() != "":
        query["query"].update({"export-type": export})

    typer.secho("\nYour query is ready\n",
                fg=typer.colors.BRIGHT_GREEN,
                bold=True)

    filename = typer.prompt("Give a name to your query") + ".toml"
    folder_path = Path.home() / ".queries"
    file_path = Path.home() / ".queries" / Path(filename)

    if not Path(folder_path).exists():
        Path(folder_path).mkdir(exist_ok=False)

    if not Path(file_path).exists():
        with open(file_path, "w") as file:
            toml.dump(query, file)

        return filename

    else:
        new_name = typer.prompt(
            "File name already exists, enter a new name.") + ".toml"
        file_path = Path.home() / ".queries" / Path(new_name)

        if not Path(file_path).exists():
            with open(file_path, "w") as file:
                toml.dump(query, file)

    return new_name
Esempio n. 51
0
def write_toml(filename, toml_object):
    with open(filename, 'wt') as f:
        toml.dump(toml_object, f)
Esempio n. 52
0
 def export(self):
     dest_file = path.join(self.buildmap.temp_dir, "tegola.toml")
     with open(dest_file, "w") as fp:
         toml.dump(self.generate_tegola_config(), fp)
Esempio n. 53
0
 def set_eden_config(self, config) -> None:
     config_d = pathlib.Path(self.etc_eden_dir) / "config.d"
     config_d.mkdir()
     with open(config_d / "systemd.toml", "w") as config_file:
         toml.dump(config, config_file)
Esempio n. 54
0
 def shutdown(self):
     with open(self.config_location, 'w') as file:
         toml.dump(self.config, file)
Esempio n. 55
0
 def teardown(self):
     self.settings['tumblr']['last_look'] = self.last_look
     with open(self.toml_file, "w") as f:
         toml.dump(self.settings, f)
     if not self.multiplexing:
         self.pyborg.save_all()
Esempio n. 56
0
def load_config(args):
    default_config_path = os.path.dirname(args.config)
    if not os.path.exists(default_config_path):
        os.makedirs(default_config_path)

    import pwnagotchi
    ref_defaults_file = os.path.join(os.path.dirname(pwnagotchi.__file__),
                                     'defaults.toml')
    ref_defaults_data = None

    # check for a config.yml file on /boot/
    for boot_conf in ['/boot/config.yml', '/boot/config.toml']:
        if os.path.exists(boot_conf):
            # logging not configured here yet
            print("installing %s to %s ...", boot_conf, args.user_config)
            # https://stackoverflow.com/questions/42392600/oserror-errno-18-invalid-cross-device-link
            shutil.move(boot_conf, args.user_config)
            break

    # check for an entire pwnagotchi folder on /boot/
    if os.path.isdir('/boot/pwnagotchi'):
        print("installing /boot/pwnagotchi to /etc/pwnagotchi ...")
        shutil.rmtree('/etc/pwnagotchi', ignore_errors=True)
        shutil.move('/boot/pwnagotchi', '/etc/')

    # if not config is found, copy the defaults
    if not os.path.exists(args.config):
        print("copying %s to %s ..." % (ref_defaults_file, args.config))
        shutil.copy(ref_defaults_file, args.config)
    else:
        # check if the user messed with the defaults

        with open(ref_defaults_file) as fp:
            ref_defaults_data = fp.read()

        with open(args.config) as fp:
            defaults_data = fp.read()

        if ref_defaults_data != defaults_data:
            print(
                "!!! file in %s is different than release defaults, overwriting !!!"
                % args.config)
            shutil.copy(ref_defaults_file, args.config)

    # load the defaults
    with open(args.config) as fp:
        config = toml.load(fp)

    # load the user config
    try:
        user_config = None
        # migrate
        yaml_name = args.user_config.replace('.toml', '.yml')
        if not os.path.exists(args.user_config) and os.path.exists(yaml_name):
            # no toml found; convert yaml
            logging.info('Old yaml-config found. Converting to toml...')
            with open(args.user_config,
                      'w') as toml_file, open(yaml_name) as yaml_file:
                import yaml
                user_config = yaml.safe_load(yaml_file)
                # convert int/float keys to str
                user_config = keys_to_str(user_config)
                # convert to toml but use loaded yaml
                toml.dump(user_config, toml_file)
        elif os.path.exists(args.user_config):
            with open(args.user_config) as toml_file:
                user_config = toml.load(toml_file)

        if user_config:
            config = merge_config(user_config, config)
    except Exception as ex:
        logging.error(
            "There was an error processing the configuration file:\n%s ", ex)
        sys.exit(1)

    # dropins
    dropin = config['main']['confd']
    if dropin and os.path.isdir(dropin):
        dropin += '*.toml' if dropin.endswith(
            '/') else '/*.toml'  # only toml here; yaml is no more
        for conf in glob.glob(dropin):
            with open(conf) as toml_file:
                additional_config = toml.load(toml_file)
                config = merge_config(additional_config, config)

    # the very first step is to normalize the display name so we don't need dozens of if/elif around
    if config['ui']['display']['type'] in ('inky', 'inkyphat'):
        config['ui']['display']['type'] = 'inky'

    elif config['ui']['display']['type'] in ('papirus', 'papi'):
        config['ui']['display']['type'] = 'papirus'

    elif config['ui']['display']['type'] in ('oledhat', ):
        config['ui']['display']['type'] = 'oledhat'

    elif config['ui']['display']['type'] in ('ws_1', 'ws1', 'waveshare_1',
                                             'waveshare1'):
        config['ui']['display']['type'] = 'waveshare_1'

    elif config['ui']['display']['type'] in ('ws_2', 'ws2', 'waveshare_2',
                                             'waveshare2'):
        config['ui']['display']['type'] = 'waveshare_2'

    elif config['ui']['display']['type'] in ('ws_27inch', 'ws27inch',
                                             'waveshare_27inch',
                                             'waveshare27inch'):
        config['ui']['display']['type'] = 'waveshare27inch'

    elif config['ui']['display']['type'] in ('ws_29inch', 'ws29inch',
                                             'waveshare_29inch',
                                             'waveshare29inch'):
        config['ui']['display']['type'] = 'waveshare29inch'

    elif config['ui']['display']['type'] in ('lcdhat', ):
        config['ui']['display']['type'] = 'lcdhat'

    elif config['ui']['display']['type'] in ('dfrobot', 'df'):
        config['ui']['display']['type'] = 'dfrobot'

    elif config['ui']['display']['type'] in ('ws_154inch', 'ws154inch',
                                             'waveshare_154inch',
                                             'waveshare154inch'):
        config['ui']['display']['type'] = 'waveshare154inch'

    elif config['ui']['display']['type'] in ('waveshare144lcd', 'ws_144inch',
                                             'ws144inch', 'waveshare_144inch',
                                             'waveshare144inch'):
        config['ui']['display']['type'] = 'waveshare144lcd'

    elif config['ui']['display']['type'] in ('ws_213d', 'ws213d',
                                             'waveshare_213d',
                                             'waveshare213d'):
        config['ui']['display']['type'] = 'waveshare213d'

    elif config['ui']['display']['type'] in ('ws_213bc', 'ws213bc',
                                             'waveshare_213bc',
                                             'waveshare213bc'):
        config['ui']['display']['type'] = 'waveshare213bc'

    elif config['ui']['display']['type'] in ('spotpear24inch'):
        config['ui']['display']['type'] = 'spotpear24inch'

    else:
        print("unsupported display type %s" % config['ui']['display']['type'])
        sys.exit(1)

    return config
Esempio n. 57
0
    def generate_input_toml(self, gradient: str, movie=False):
        """
        Generate the input file that the smoother requires
        
        :param gradient: Path to the gradient file to be smoothed
        :type gradient: str
        :param movie: If a movie should be saved, defaults to False
        :type movie: bool
        """
        # Define a few paths
        seperator = "/"
        if movie:
            movie_file = seperator.join(gradient.split(seperator)[:-1])
            movie_file += "/smooth_movie.h5"
        output_file = seperator.join(gradient.split(seperator)[:-1])
        output_file += "/smooth_gradient.h5"

        grad_folder, _ = os.path.split(gradient)
        smoothing_fields_mesh = os.path.join(grad_folder,
                                             "smoothing_fields.h5")
        # Domain dictionary
        mesh = {"filename": gradient, "format": "hdf5"}
        domain = {
            "dimension": 3,
            "polynomial-order": 4,
            "mesh": mesh,
            "model": mesh,
            "geometry": mesh,
        }

        # Physics dictionary
        diffusion_equation = {
            "start-time-in-seconds": 0.0,
            "end-time-in-seconds": 1.0,
            "time-step-in-seconds": 0.001,
            "time-stepping-scheme": "euler",
            "initial-values": {
                "filename": gradient,
                "format": "hdf5",
                "field": self.comm.project.inversion_params,
            },
            "final-values": {
                "filename": output_file
            },
        }

        physics = {"diffusion-equation": diffusion_equation}

        # Output dict
        if movie:
            volume_data = {
                "fields": ["VS"],
                "sampling-interval-in-time-steps": 10,
                "filename": movie_file,
                "format": "hdf5",
            }
            output = {"volume-data": volume_data}

        input_dict = {"domain": domain, "physics": physics}
        if movie:
            input_dict["output"] = output

        # Write toml file
        toml_dir = seperator.join(gradient.split(seperator)[:-1])
        toml_filename = "input.toml"
        toml_path = os.path.join(toml_dir, toml_filename)
        with open(toml_path, "w+") as fh:
            toml.dump(input_dict, fh)