예제 #1
0
def main(config_file, run_type='train', checkpoint=''):
    # pylint: disable=no-member
    config = Config(config_file)

    print(config)

    if run_type == 'train':
        from trainer import Trainer
        trainer = Trainer(dataset_dir=config.dataset_dir,
                          log_dir=config.log_dir,
                          generator_channels=config.generator_channels,
                          discriminator_channels=config.discriminator_channels,
                          nz=config.nz,
                          style_depth=config.style_depth,
                          lrs=config.lrs,
                          betas=config.betas,
                          eps=config.eps,
                          phase_iter=config.phase_iter,
                          batch_size=config.batch_size,
                          n_cpu=config.n_cpu,
                          opt_level=config.opt_level)
        trainer.run(log_iter=config.log_iter, checkpoint=checkpoint)
    elif run_type == 'inference':
        from inferencer import Inferencer
        inferencer = Inferencer(
            generator_channels=config.generator_channels,
            nz=config.nz,
            style_depth=config.style_depth,
        )
        inferencer.inference(n=8)
    else:
        raise NotImplementedError
예제 #2
0
    def _initialize(self, corpus, vocab, prior_tree, number_of_topics, alpha_alpha):
        Inferencer._initialize(self, vocab, prior_tree, number_of_topics, alpha_alpha);

        # initialize the documents, key by the document path, value by a list of non-stop and tokenized words, with duplication.
        self._corpus = corpus;
        self._parsed_corpus = self.parse_data();
        
        # initialize the size of the collection, i.e., total number of documents.
        self._number_of_documents = len(self._parsed_corpus);

        '''
        # initialize a D-by-K matrix gamma, valued at N_d/K
        #self._gamma = numpy.zeros((self._number_of_documents, self._number_of_topics)) + self._alpha_alpha[numpy.newaxis, :] + 1.0 * self._number_of_types / self._number_of_topics;

        # initialize a V-by-K matrix beta, valued at 1/V, subject to the sum over every row is 1
        #self._eta = numpy.random.gamma(100., 1. / 100., (self._number_of_topics, self._number_of_types));
        #self._E_log_eta = compute_dirichlet_expectation(self._eta);
        '''

        # initialize the size of the vocabulary, i.e. total number of distinct tokens.
        # self._number_of_terms = len(self._type_to_index)
        
        # initialize a D-by-K matrix gamma, valued at N_d/K
        # self._gamma = numpy.zeros((self._number_of_documents, self._number_of_topics)) + self._alpha_alpha + 1.0 * self._number_of_paths / self._number_of_topics;
        # self._gamma = numpy.tile(self._alpha_alpha + 1.0 * self._number_of_terms / self._number_of_topics, (self._number_of_documents, 1));
        self._gamma = self._alpha_alpha + 2.0 * self._number_of_paths / self._number_of_topics * numpy.random.random((self._number_of_documents, self._number_of_topics));
        
        # initialize a _E_log_beta variable, indexed by node, valued by a K-by-C matrix, where C stands for the number of children of that node
        self._var_beta = numpy.random.gamma(100., 1. / 100., (self._number_of_topics, self._number_of_edges));
        # for edge_index in self._index_to_edge:
            # self._var_beta[:, [edge_index]] += numpy.sum(phi_sufficient_statistics[:, self._paths_through_edge[edge_index]], axis=1)[:, numpy.newaxis];
            
        '''
예제 #3
0
    def _initialize(self,
                    corpus,
                    vocab,
                    number_of_topics,
                    alpha_alpha,
                    alpha_beta,
                    alpha_eta=0,
                    alpha_sigma_square=1.0
                    ):
        Inferencer._initialize(self, vocab, number_of_topics, alpha_alpha, alpha_beta);

        self._parsed_corpus, self._responses = self.parse_data(corpus);
        
        # define the total number of document
        self._number_of_documents = len(self._parsed_corpus);
        
        # initialize a D-by-K matrix gamma, valued at N_d/K
        self._gamma = numpy.zeros((self._number_of_documents, self._number_of_topics)) + self._alpha_alpha[numpy.newaxis, :] + 1.0 * self._number_of_types / self._number_of_topics;
        # self._gamma = numpy.random.gamma(100., 1./100, (self._number_of_documents, self._number_of_topics))
        
        # initialize a V-by-K matrix _eta, valued at 1/V, subject to the sum over every row is 1
        self._beta = numpy.random.gamma(100., 1. / 100., (self._number_of_topics, self._number_of_types));
        # self._beta /= numpy.sum(self._beta, 1)[:, numpy.newaxis]
        # self._E_log_eta = compute_dirichlet_expectation(self._beta);
        
        self._eta = numpy.zeros((1, self._number_of_topics)) + alpha_eta
        self._sigma_square = alpha_sigma_square
예제 #4
0
def test(config):
    _config_test(config)

    de2idx, idx2de = load_de_vocab()
    en2idx, idx2en = load_en_vocab()
    
    model = ConvSeq2Seq(config)
    graph_handler = GraphHandler(config)
    inferencer = Inferencer(config, model)
    sess = tf.Session()
    graph_handler.initialize(sess)

    global_step = 0
    refs = []
    hypotheses = []
    with codecs.open(os.path.join(config.eval_dir, config.model_name), "w", "utf-8") as fout:
        for i, batch in tqdm(enumerate(get_batch_for_test())):
            preds = inferencer.run(sess, batch)
            sources = batch['source']
            targets = batch['target']
            for source, target, pred in zip(sources, targets, preds):
                got = " ".join(idx2en[idx] for idx in pred).split("</S>")[0].strip()
                fout.write("- source: " + source +"\n")
                fout.write("- expected: " + target + "\n")
                fout.write("- got: " + got + "\n\n")
                fout.flush()

                ref = target.split()
                hypothesis = got.split()
                if len(ref) > 3 and len(hypothesis) > 3:
                    refs.append([ref])
                    hypotheses.append(hypothesis)

        score = corpus_bleu(refs, hypotheses)
        fout.write("Bleu Score = " + str(100*score))
예제 #5
0
    def __init__(self,
                 hash_oov_words=False,
                 number_of_samples=10,
                 burn_in_sweeps=5):
        Inferencer.__init__(self, hash_oov_words)

        self._number_of_samples = number_of_samples
        self._burn_in_sweeps = burn_in_sweeps
예제 #6
0
    def __init__(self,
                 hash_oov_words=False,
                 maximum_gamma_update_iteration=50,
                 minimum_mean_change_threshold=1e-3):
        Inferencer.__init__(self, hash_oov_words)

        self._maximum_gamma_update_iteration = maximum_gamma_update_iteration
        self._minimum_mean_change_threshold = minimum_mean_change_threshold
예제 #7
0
파일: hybrid.py 프로젝트: Jessilee/PyLDA
 def __init__(self,
              hyper_parameter_optimize_interval=1,
              
              #hyper_parameter_iteration=100,
              #hyper_parameter_decay_factor=0.9,
              #hyper_parameter_maximum_decay=10,
              #hyper_parameter_converge_threshold=1e-6,
              ):
     Inferencer.__init__(self, hyper_parameter_optimize_interval);
예제 #8
0
 def __init__(self,
              hash_oov_words=False,
              maximum_gamma_update_iteration=50,
              minimum_mean_change_threshold=1e-3
              ):
     Inferencer.__init__(self, hash_oov_words);
     
     self._maximum_gamma_update_iteration = maximum_gamma_update_iteration;
     self._minimum_mean_change_threshold = minimum_mean_change_threshold;
예제 #9
0
    def __init__(self,
                 hyper_parameter_optimize_interval=1,
                 symmetric_alpha_alpha=True,
                 symmetric_alpha_beta=True,
                 ):
        Inferencer.__init__(self, hyper_parameter_optimize_interval);

        self._symmetric_alpha_alpha = symmetric_alpha_alpha
        self._symmetric_alpha_beta = symmetric_alpha_beta
예제 #10
0
 def __init__(self,
              hash_oov_words=False,
              number_of_samples=10,
              burn_in_sweeps=5
              ):
     Inferencer.__init__(self, hash_oov_words);
     
     self._number_of_samples = number_of_samples;
     self._burn_in_sweeps = burn_in_sweeps;
예제 #11
0
    def __init__(
        self,
        hyper_parameter_optimize_interval=1,

        #hyper_parameter_iteration=100,
        #hyper_parameter_decay_factor=0.9,
        #hyper_parameter_maximum_decay=10,
        #hyper_parameter_converge_threshold=1e-6,
    ):
        Inferencer.__init__(self, hyper_parameter_optimize_interval)
예제 #12
0
    def __init__(
        self,
        hyper_parameter_optimize_interval=10,
        symmetric_alpha_alpha=True,
        symmetric_alpha_beta=True,
    ):
        Inferencer.__init__(self, hyper_parameter_optimize_interval)

        self._symmetric_alpha_alpha = symmetric_alpha_alpha
        self._symmetric_alpha_beta = symmetric_alpha_beta
예제 #13
0
    def __init__(self,
                 hyper_parameter_optimize_interval=10,
                 symmetric_alpha_alpha=True,
                 symmetric_alpha_beta=True,

                 #local_parameter_iteration=1,
                 ):
        Inferencer.__init__(self, hyper_parameter_optimize_interval);

        self._symmetric_alpha_alpha=symmetric_alpha_alpha
        self._symmetric_alpha_beta=symmetric_alpha_beta
예제 #14
0
    def __init__(self,
                 hyper_parameter_optimize_interval=1,
                 symmetric_alpha_alpha=True,
                 symmetric_alpha_beta=True,
                 #scipy_optimization_method="BFGS",
                 scipy_optimization_method="L-BFGS-B",
                 #scipy_optimization_method = "CG"
                 ):
        Inferencer.__init__(self, hyper_parameter_optimize_interval);

        self._symmetric_alpha_alpha = symmetric_alpha_alpha
        self._symmetric_alpha_beta = symmetric_alpha_beta
        
        self._scipy_optimization_method = scipy_optimization_method
예제 #15
0
    def _initialize(self, corpus, vocab, number_of_topics, alpha_alpha, alpha_beta):
        Inferencer._initialize(self, vocab, number_of_topics, alpha_alpha, alpha_beta);

        #self._corpus = corpus;
        self._parsed_corpus = self.parse_data(corpus);
        
        # define the total number of document
        self._number_of_documents = len(self._parsed_corpus[0]);
        
        # initialize a D-by-K matrix gamma, valued at N_d/K
        self._gamma = numpy.zeros((self._number_of_documents, self._number_of_topics)) + self._alpha_alpha[numpy.newaxis, :] + 1.0 * self._number_of_types / self._number_of_topics;

        # initialize a V-by-K matrix beta, valued at 1/V, subject to the sum over every row is 1
        self._eta = numpy.random.gamma(100., 1. / 100., (self._number_of_topics, self._number_of_types));
예제 #16
0
    def _initialize(self, corpus, vocab, number_of_topics, alpha_alpha, alpha_beta):
        Inferencer._initialize(self, vocab, number_of_topics, alpha_alpha, alpha_beta);

        self._corpus = corpus;
        self._parsed_corpus = self.parse_data();
        
        # define the total number of document
        self._number_of_documents = len(self._parsed_corpus[0]);
        
        # initialize a D-by-K matrix gamma, valued at N_d/K
        self._gamma = numpy.zeros((self._number_of_documents, self._number_of_topics)) + self._alpha_alpha[numpy.newaxis, :] + 1.0 * self._number_of_types / self._number_of_topics;

        # initialize a V-by-K matrix beta, valued at 1/V, subject to the sum over every row is 1
        self._eta = numpy.random.gamma(100., 1. / 100., (self._number_of_topics, self._number_of_types));
예제 #17
0
 def __init__(self,
              hyper_parameter_optimize_interval=1,
              ):
     '''
     update_hyper_parameter=True,
     alpha_update_decay_factor=0.9,
     alpha_maximum_decay=10,
     alpha_converge_threshold=0.000001,
     alpha_maximum_iteration=100,
     model_likelihood_threshold=0.00001,
     
     gamma_converge_threshold=0.000001,
     gamma_maximum_iteration=20
     '''
     
     Inferencer.__init__(self, hyper_parameter_optimize_interval);
def main():
    flags(sys.argv)

    model_params = Inferencer_Params(image_size=256,
                                     model_path=flags.model_dir)
    model_inferencer = Inferencer(model_params)

    pipeline_params = Pipeline_Inferencer_Params(data_dir_x=os.path.join(
        flags.dataset, 'test_X'),
                                                 data_dir_y=None)
    pipeline = Pipeline_Inferencer(inferencer=model_inferencer,
                                   params=pipeline_params,
                                   pre_processing=None)

    count = 0
    first_pass = True
    while first_pass or img_out is not None:
        if first_pass:
            first_pass = False
            if not os.path.exists(flags.outdir):
                os.makedirs(flags.outdir)

        img_out = pipeline.run()
        if img_out is not None:
            filename = get_filename(count, 'mask_')
            imageio.imwrite(os.path.join(flags.outdir, filename), img_out)
            print(' [*] save file ' + filename)
        count += 1
예제 #19
0
파일: monte_carlo.py 프로젝트: jz3707/PyLDA
 def _initialize(self, corpus, vocab, number_of_topics, alpha_alpha, alpha_beta):
     Inferencer._initialize(self, vocab, number_of_topics, alpha_alpha, alpha_beta);
     
     self._parsed_corpus = self.parse_data(corpus);
     
     # define the total number of document
     self._number_of_documents = len(self._parsed_corpus);
     
     # define the counts over different topics for all documents, first indexed by doc_id id, the indexed by topic id
     self._n_dk = numpy.zeros((self._number_of_documents, self._number_of_topics));
     # define the counts over words for all topics, first indexed by topic id, then indexed by token id
     self._n_kv = numpy.zeros((self._number_of_topics, self._number_of_types));
     self._n_k = numpy.zeros(self._number_of_topics);
     # define the topic assignment for every word in every document, first indexed by doc_id id, then indexed by word word_pos
     self._k_dn = {};
     
     self.random_initialize();
예제 #20
0
    def _initialize(self, corpus_en, voc_en, corpus_cn, voc_cn,
                    number_of_topics_ge, alpha_alpha, alpha_beta, lam):
        Inferencer._initialize(self,
                               voc_en,
                               voc_cn,
                               number_of_topics_ge,
                               alpha_alpha,
                               alpha_beta,
                               lam=0.5)
        self._corpus_en = corpus_en
        self._corpus_cn = corpus_cn
        self._trans_en_cn = np.zeros(
            (self._number_of_types_cn, self._number_of_types_en))
        self._trans_cn_en = np.zeros(
            (self._number_of_types_en, self._number_of_types_cn))
        self.parse_data()

        # define the total number of document
        self._number_of_documents_en = len(self._word_idss_en)
        self._number_of_documents_cn = len(self._word_idss_cn)
        self._number_of_documents = self._number_of_documents_en + self._number_of_documents_cn

        self._n_dk_en = np.zeros(
            (self._number_of_documents_en, self._number_of_topics))
        self._n_dk_cn = np.zeros(
            (self._number_of_documents_cn, self._number_of_topics))
        self._n_kv_en = np.zeros(
            (self._number_of_topics, self._number_of_types_en))
        self._n_kv_cn = np.zeros(
            (self._number_of_topics, self._number_of_types_cn))
        # define the topic assignment for every word in every document, first indexed by doc_id id, then indexed by word word_pos
        self._n_k_en = np.zeros(self._number_of_topics)
        self._n_k_cn = np.zeros(self._number_of_topics)
        self._k_dn_en = {}
        self._k_dn_cn = {}
        self.psi_en = np.zeros(
            (self._number_of_topics, self._number_of_types_en))
        self.psi_cn = np.zeros(
            (self._number_of_topics, self._number_of_types_cn))
        self.phi_en = np.zeros(
            (self._number_of_topics, self._number_of_types_en))
        self.phi_cn = np.zeros(
            (self._number_of_topics, self._number_of_types_cn))

        self.random_initialize()
예제 #21
0
 def _initialize(self, corpus, vocab, number_of_topics, alpha_alpha, alpha_beta):
     Inferencer._initialize(self, vocab, number_of_topics, alpha_alpha, alpha_beta);
     
     self._corpus = corpus;
     self.parse_data();
     
     # define the total number of document
     self._number_of_documents = len(self._word_idss);
     
     # define the counts over different topics for all documents, first indexed by doc_id id, the indexed by topic id
     self._n_dk = numpy.zeros((self._number_of_documents, self._number_of_topics));
     # define the counts over words for all topics, first indexed by topic id, then indexed by token id
     self._n_kv = numpy.zeros((self._number_of_topics, self._number_of_types));
     self._n_k = numpy.zeros(self._number_of_topics);
     # define the topic assignment for every word in every document, first indexed by doc_id id, then indexed by word word_pos
     self._k_dn = {};
     
     self.random_initialize();
예제 #22
0
 def __init__(self,
              update_hyper_parameter=True,
              alpha_update_decay_factor=0.9,
              alpha_maximum_decay=10,
              alpha_converge_threshold=0.000001,
              alpha_maximum_iteration=100,
              model_likelihood_threshold=0.00001,
              gamma_converge_threshold=0.000001,
              gamma_maximum_iteration=20
              ):
     Inferencer.__init__(self, update_hyper_parameter, alpha_update_decay_factor, alpha_maximum_decay, alpha_converge_threshold, alpha_maximum_iteration, model_likelihood_threshold);
     
     #self._alpha_update_decay_factor = alpha_update_decay_factor;
     #self._alpha_maximum_decay = alpha_maximum_decay;
     #self._alpha_converge_threshold = alpha_converge_threshold;
     #self._alpha_maximum_iteration = alpha_maximum_iteration;
     
     self._gamma_maximum_iteration = gamma_maximum_iteration;
     self._gamma_converge_threshold = gamma_converge_threshold;
예제 #23
0
 def __init__(self,
              update_hyper_parameter=True,
              alpha_update_decay_factor=0.9,
              alpha_maximum_decay=10,
              alpha_converge_threshold=0.000001,
              alpha_maximum_iteration=100,
              model_likelihood_threshold=0.00001,
              number_of_samples=10,
              burn_in_samples=5
              ):
     Inferencer.__init__(self, update_hyper_parameter, alpha_update_decay_factor, alpha_maximum_decay, alpha_converge_threshold, alpha_maximum_iteration, model_likelihood_threshold);
     
     #self._alpha_update_decay_factor = alpha_update_decay_factor;
     #self._alpha_maximum_decay = alpha_maximum_decay;
     #self._alpha_converge_threshold = alpha_converge_threshold;
     #self._alpha_maximum_iteration = alpha_maximum_iteration;
     
     self._number_of_samples = number_of_samples;
     self._burn_in_samples = burn_in_samples;
예제 #24
0
    def _initialize(self, corpus, vocab, labels, alpha_alpha, alpha_beta):
        Inferencer._initialize(self, vocab, labels, alpha_alpha, alpha_beta)

        self._number_of_topics = len(self._label_to_index)

        self._parsed_corpus, self._parsed_labels = self.parse_data(corpus)
        '''
        # define the total number of document
        self._number_of_documents = len(self._parsed_corpus);
        
        # define the counts over different topics for all documents, first indexed by doc_id id, the indexed by topic id
        self._n_dk = numpy.zeros((self._number_of_documents, self._number_of_topics), dtype=int)
        # define the counts over words for all topics, first indexed by topic id, then indexed by token id
        self._n_kv = numpy.zeros((self._number_of_topics, self._number_of_types), dtype=int)
        self._n_k = numpy.zeros(self._number_of_topics, dtype=int)
        # define the topic assignment for every word in every document, first indexed by doc_id id, then indexed by word word_pos
        self._k_dn = {};
        '''

        # self._number_of_documents, self._k_dn, self._n_dk, self._n_k, self._n_kv = self.random_initialize();
        self._k_dn, self._n_dk, self._n_k, self._n_kv = self.random_initialize(
        )
예제 #25
0
def main():
    A1 = fset.LeftSkewTrapezoid('A1', (0, 0), (1, 1), (4, 0))
    A2 = fset.Triangle('A2', (2, 0), (5, 1), (8, 0))
    A3 = fset.RightSkewTrapezoid('A3', (6, 0), (8, 1), (10, 0))

    rule1 = 'IF x IS A1 THEN y IS B1'
    rule2 = 'IF x IS A2 THEN y IS B2'
    rule3 = 'IF x IS A3 THEN y IS B3'

    fsets = [A1, A2, A3]
    rules = [rule1, rule2, rule3]

    inferencer = Inferencer()
    inferencer.add_fsets(fsets)
    inferencer.add_rules(rules)

    input_test = {'x': 7}

    print inferencer.evaluate(input_test)
예제 #26
0
""" load data """
train_loader, val_loader, test_loader, m_dh = prepare_data(
    TRAIN_PATH, VAL_PATH, TEST_PATH, DH_PATH, LOAD_FROM_DUMP, BATCH_SIZE)
""" model setup """
INPUT_DIM, OUTPUT_DIM = len(m_dh.de_vocab), len(m_dh.en_vocab)

enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, 0)
attn = Attention(ENC_HID_DIM, DEC_HID_DIM, ATTN_DIM)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, 0, attn)

model = Seq2Seq(enc, dec)
""" load model """
state_dict = torch.load('ckpts/best.pt')
model.load_state_dict(state_dict['model_state'])

en_infer = Inferencer(m_dh.en_vocab)
de_infer = Inferencer(m_dh.de_vocab)

criterion = torch.nn.CrossEntropyLoss(ignore_index=1)

src, trg = next(iter(test_loader))

trg_text = en_infer.decode(trg)
with open('validate_sample/target.txt', 'w') as f:
    f.writelines(trg_text)
print(trg_text)
inversion = Inversion(model,
                      MAX_LEN,
                      INPUT_DIM,
                      criterion,
                      ENTROPY_S,
예제 #27
0
    TRAIN_PATH, VAL_PATH, TEST_PATH, DH_PATH, LOAD_FROM_DUMP, 3)
""" model setup """
INPUT_DIM, OUTPUT_DIM = len(m_dh.de_vocab), len(m_dh.en_vocab)

enc = Encoder(INPUT_DIM, ENC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, ENC_DROPOUT)
attn = Attention(ENC_HID_DIM, DEC_HID_DIM, ATTN_DIM)
dec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, ENC_HID_DIM, DEC_HID_DIM, DEC_DROPOUT,
              attn)

model = Seq2Seq(enc, dec)
""" load model """
state_dict = torch.load('ckpts/best.pt')
model.load_state_dict(state_dict['model_state'])
model.eval()

en_infer = Inferencer(m_dh.en_vocab)

src, trg = next(iter(test_loader))
""" ______________ """
import matplotlib.pyplot as plt
import numpy


def plot_head_map(mma, target_labels, source_labels):
    fig, ax = plt.subplots()
    heatmap = ax.pcolor(mma, cmap=plt.cm.Blues)
    # put the major ticks at the middle of each cell
    ax.set_xticks(numpy.arange(mma.shape[1]) + 0.5,
                  minor=False)  # mma.shape[1] = target seq 길이
    ax.set_yticks(numpy.arange(mma.shape[0]) + 0.5,
                  minor=False)  # mma.shape[0] = input seq 길이
예제 #28
0
def main():
    input_directory = sys.argv[1];
    output_directory = sys.argv[2];
    if len(sys.argv)==4 and len(sys.argv[3])>0:
        log_beta_path = sys.argv[3]
    else:
        log_beta_path = None;

    from inferencer import Inferencer;
    lda_inferencer = Inferencer();
    lda_inferencer.load_params(os.path.join(output_directory, "current-params"));
    lda_inferencer.load_tree(os.path.join(output_directory, "current-tree"));
    lda_inferencer.format_output(input_directory);
    
    if lda_inferencer._update_hyper_parameter:
        lda_inferencer.dump_parameters(os.path.join(output_directory, "current-params"));
    
    lda_inferencer.dump_E_log_beta(os.path.join(output_directory, "current-E-log-beta"));

    if log_beta_path!=None:
        lda_inferencer.export_E_log_beta(log_beta_path);

    #if not hybrid_mode:
        #lda_inferencer.dump_gamma(os.path.join(output_directory, "current-gamma"));

    lda_inferencer.export_gamma(os.path.join(output_directory, "gamma"));
예제 #29
0
def main(TYPE="train", W_PATH=None):
    print("TYPE", TYPE)
    print("W_PATH", W_PATH)
    engine = Trainer(W_PATH) if TYPE == "train" else Inferencer(W_PATH)
    engine.run()
예제 #30
0
def main(
    metadata_path,
    source_dir,
    target_dir,
    output_dir,
    root,
    batch_size,
    reload,
    reload_dir,
):
    """Main function"""

    # import Inferencer module

    inferencer = Inferencer(root)
    device = inferencer.device
    sample_rate = inferencer.sample_rate
    print(f"[INFO]: Inferencer is loaded from {root}.")

    metadata = json.load(open(metadata_path))
    print(f"[INFO]: Metadata list is loaded from {metadata_path}.")

    output_dir = Path(output_dir) / Path(root).stem / \
        f"{metadata['source_corpus']}2{metadata['target_corpus']}"
    output_dir.mkdir(parents=True, exist_ok=True)

    if reload:
        metadata, conv_mels = reload_from_numpy(device, metadata, reload_dir)
    else:
        metadata, conv_mels = conversion(inferencer, device, root, metadata,
                                         source_dir, target_dir, output_dir)

    waveforms = []
    max_memory_use = conv_mels[0].size(0) * batch_size

    with torch.no_grad():
        pbar = tqdm(total=metadata["n_samples"])
        left = 0
        while (left < metadata["n_samples"]):
            batch_size = max_memory_use // conv_mels[left].size(0) - 1
            right = left + min(batch_size, metadata["n_samples"] - left)
            waveforms.extend(
                inferencer.spectrogram2waveform(conv_mels[left:right]))
            pbar.update(batch_size)
            left += batch_size
        pbar.close()

    for pair, waveform in tqdm(zip(metadata["pairs"], waveforms)):
        waveform = waveform.detach().cpu().numpy()

        prefix = Path(pair["src_utt"]).stem
        postfix = Path(pair["tgt_utts"][0]).stem
        file_path = output_dir / f"{prefix}_to_{postfix}.wav"
        pair["converted"] = f"{prefix}_to_{postfix}.wav"

        if Path(root).stem == "BLOW":
            wavfile.write(file_path, sample_rate, waveform)
        else:
            sf.write(file_path, waveform, sample_rate)

    metadata_output_path = output_dir / "metadata.json"
    json.dump(metadata, metadata_output_path.open("w"), indent=2)