def main():
    flags(sys.argv)

    model_params = Inferencer_Params(image_size=256,
                                     model_path=flags.model_dir)
    model_inferencer = Inferencer(model_params)

    pipeline_params = Pipeline_Inferencer_Params(data_dir_x=os.path.join(
        flags.dataset, 'test_X'),
                                                 data_dir_y=None)
    pipeline = Pipeline_Inferencer(inferencer=model_inferencer,
                                   params=pipeline_params,
                                   pre_processing=None)

    count = 0
    first_pass = True
    while first_pass or img_out is not None:
        if first_pass:
            first_pass = False
            if not os.path.exists(flags.outdir):
                os.makedirs(flags.outdir)

        img_out = pipeline.run()
        if img_out is not None:
            filename = get_filename(count, 'mask_')
            imageio.imwrite(os.path.join(flags.outdir, filename), img_out)
            print(' [*] save file ' + filename)
        count += 1
Beispiel #2
0
def main():
    input_directory = sys.argv[1];
    output_directory = sys.argv[2];
    if len(sys.argv)==4 and len(sys.argv[3])>0:
        log_beta_path = sys.argv[3]
    else:
        log_beta_path = None;

    from inferencer import Inferencer;
    lda_inferencer = Inferencer();
    lda_inferencer.load_params(os.path.join(output_directory, "current-params"));
    lda_inferencer.load_tree(os.path.join(output_directory, "current-tree"));
    lda_inferencer.format_output(input_directory);
    
    if lda_inferencer._update_hyper_parameter:
        lda_inferencer.dump_parameters(os.path.join(output_directory, "current-params"));
    
    lda_inferencer.dump_E_log_beta(os.path.join(output_directory, "current-E-log-beta"));

    if log_beta_path!=None:
        lda_inferencer.export_E_log_beta(log_beta_path);

    #if not hybrid_mode:
        #lda_inferencer.dump_gamma(os.path.join(output_directory, "current-gamma"));

    lda_inferencer.export_gamma(os.path.join(output_directory, "gamma"));
Beispiel #3
0
def main(config_file, run_type='train', checkpoint=''):
    # pylint: disable=no-member
    config = Config(config_file)

    print(config)

    if run_type == 'train':
        from trainer import Trainer
        trainer = Trainer(dataset_dir=config.dataset_dir,
                          log_dir=config.log_dir,
                          generator_channels=config.generator_channels,
                          discriminator_channels=config.discriminator_channels,
                          nz=config.nz,
                          style_depth=config.style_depth,
                          lrs=config.lrs,
                          betas=config.betas,
                          eps=config.eps,
                          phase_iter=config.phase_iter,
                          batch_size=config.batch_size,
                          n_cpu=config.n_cpu,
                          opt_level=config.opt_level)
        trainer.run(log_iter=config.log_iter, checkpoint=checkpoint)
    elif run_type == 'inference':
        from inferencer import Inferencer
        inferencer = Inferencer(
            generator_channels=config.generator_channels,
            nz=config.nz,
            style_depth=config.style_depth,
        )
        inferencer.inference(n=8)
    else:
        raise NotImplementedError
Beispiel #4
0
def test(config):
    _config_test(config)

    de2idx, idx2de = load_de_vocab()
    en2idx, idx2en = load_en_vocab()
    
    model = ConvSeq2Seq(config)
    graph_handler = GraphHandler(config)
    inferencer = Inferencer(config, model)
    sess = tf.Session()
    graph_handler.initialize(sess)

    global_step = 0
    refs = []
    hypotheses = []
    with codecs.open(os.path.join(config.eval_dir, config.model_name), "w", "utf-8") as fout:
        for i, batch in tqdm(enumerate(get_batch_for_test())):
            preds = inferencer.run(sess, batch)
            sources = batch['source']
            targets = batch['target']
            for source, target, pred in zip(sources, targets, preds):
                got = " ".join(idx2en[idx] for idx in pred).split("</S>")[0].strip()
                fout.write("- source: " + source +"\n")
                fout.write("- expected: " + target + "\n")
                fout.write("- got: " + got + "\n\n")
                fout.flush()

                ref = target.split()
                hypothesis = got.split()
                if len(ref) > 3 and len(hypothesis) > 3:
                    refs.append([ref])
                    hypotheses.append(hypothesis)

        score = corpus_bleu(refs, hypotheses)
        fout.write("Bleu Score = " + str(100*score))
Beispiel #5
0
def main():
    A1 = fset.LeftSkewTrapezoid('A1', (0, 0), (1, 1), (4, 0))
    A2 = fset.Triangle('A2', (2, 0), (5, 1), (8, 0))
    A3 = fset.RightSkewTrapezoid('A3', (6, 0), (8, 1), (10, 0))

    rule1 = 'IF x IS A1 THEN y IS B1'
    rule2 = 'IF x IS A2 THEN y IS B2'
    rule3 = 'IF x IS A3 THEN y IS B3'

    fsets = [A1, A2, A3]
    rules = [rule1, rule2, rule3]

    inferencer = Inferencer()
    inferencer.add_fsets(fsets)
    inferencer.add_rules(rules)

    input_test = {'x': 7}

    print inferencer.evaluate(input_test)
Beispiel #6
0
def main(
    metadata_path,
    source_dir,
    target_dir,
    output_dir,
    root,
    batch_size,
    reload,
    reload_dir,
):
    """Main function"""

    # import Inferencer module

    inferencer = Inferencer(root)
    device = inferencer.device
    sample_rate = inferencer.sample_rate
    print(f"[INFO]: Inferencer is loaded from {root}.")

    metadata = json.load(open(metadata_path))
    print(f"[INFO]: Metadata list is loaded from {metadata_path}.")

    output_dir = Path(output_dir) / Path(root).stem / \
        f"{metadata['source_corpus']}2{metadata['target_corpus']}"
    output_dir.mkdir(parents=True, exist_ok=True)

    if reload:
        metadata, conv_mels = reload_from_numpy(device, metadata, reload_dir)
    else:
        metadata, conv_mels = conversion(inferencer, device, root, metadata,
                                         source_dir, target_dir, output_dir)

    waveforms = []
    max_memory_use = conv_mels[0].size(0) * batch_size

    with torch.no_grad():
        pbar = tqdm(total=metadata["n_samples"])
        left = 0
        while (left < metadata["n_samples"]):
            batch_size = max_memory_use // conv_mels[left].size(0) - 1
            right = left + min(batch_size, metadata["n_samples"] - left)
            waveforms.extend(
                inferencer.spectrogram2waveform(conv_mels[left:right]))
            pbar.update(batch_size)
            left += batch_size
        pbar.close()

    for pair, waveform in tqdm(zip(metadata["pairs"], waveforms)):
        waveform = waveform.detach().cpu().numpy()

        prefix = Path(pair["src_utt"]).stem
        postfix = Path(pair["tgt_utts"][0]).stem
        file_path = output_dir / f"{prefix}_to_{postfix}.wav"
        pair["converted"] = f"{prefix}_to_{postfix}.wav"

        if Path(root).stem == "BLOW":
            wavfile.write(file_path, sample_rate, waveform)
        else:
            sf.write(file_path, waveform, sample_rate)

    metadata_output_path = output_dir / "metadata.json"
    json.dump(metadata, metadata_output_path.open("w"), indent=2)
Beispiel #7
0
def main(TYPE="train", W_PATH=None):
    print("TYPE", TYPE)
    print("W_PATH", W_PATH)
    engine = Trainer(W_PATH) if TYPE == "train" else Inferencer(W_PATH)
    engine.run()
Beispiel #8
0
    TRAIN_PATH, VAL_PATH, TEST_PATH, DH_PATH, LOAD_FROM_DUMP, 3)
""" model setup """
INPUT_DIM, OUTPUT_DIM = len(m_dh.de_vocab), len(m_dh.en_vocab)

enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
attn = Attention(ENC_HID_DIM, DEC_HID_DIM, ATTN_DIM)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT,
              attn)

model = Seq2Seq(enc, dec)
""" load model """
state_dict = torch.load('ckpts/best.pt')
model.load_state_dict(state_dict['model_state'])
model.eval()

en_infer = Inferencer(m_dh.en_vocab)

src, trg = next(iter(test_loader))
""" ______________ """
import matplotlib.pyplot as plt
import numpy


def plot_head_map(mma, target_labels, source_labels):
    fig, ax = plt.subplots()
    heatmap = ax.pcolor(mma, cmap=plt.cm.Blues)
    # put the major ticks at the middle of each cell
    ax.set_xticks(numpy.arange(mma.shape[1]) + 0.5,
                  minor=False)  # mma.shape[1] = target seq 길이
    ax.set_yticks(numpy.arange(mma.shape[0]) + 0.5,
                  minor=False)  # mma.shape[0] = input seq 길이