コード例 #1
0
    def load(self,
             checkpoint_path,
             num_speakers=2,
             checkpoint_step=None,
             inference_prenet_dropout=True,
             model_name='tacotron'):
        self.num_speakers = num_speakers

        if os.path.isdir(checkpoint_path):
            load_path = checkpoint_path
            checkpoint_path = get_most_recent_checkpoint(
                checkpoint_path, checkpoint_step)
        else:
            load_path = os.path.dirname(checkpoint_path)

        print('Constructing model: %s' % model_name)

        inputs = tf.placeholder(tf.int32, [None, None], 'inputs')
        input_lengths = tf.placeholder(tf.int32, [None], 'input_lengths')

        batch_size = tf.shape(inputs)[0]
        speaker_id = tf.placeholder_with_default(
            tf.zeros([batch_size], dtype=tf.int32), [None], 'speaker_id')

        load_hparams(hparams, load_path)
        hparams.inference_prenet_dropout = inference_prenet_dropout
        with tf.variable_scope('model') as scope:
            self.model = create_model(hparams)

            self.model.initialize(inputs=inputs,
                                  input_lengths=input_lengths,
                                  num_speakers=self.num_speakers,
                                  speaker_id=speaker_id,
                                  is_training=False)
            self.wav_output = inv_spectrogram_tensorflow(
                self.model.linear_outputs, hparams)

        print('Loading checkpoint: %s' % checkpoint_path)

        sess_config = tf.ConfigProto(allow_soft_placement=True,
                                     intra_op_parallelism_threads=1,
                                     inter_op_parallelism_threads=2)
        sess_config.gpu_options.allow_growth = True

        self.sess = tf.Session(config=sess_config)
        self.sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(self.sess, checkpoint_path)
コード例 #2
0
def test(hp):
    # Loading hyper params
    load_hparams(hp, hp.ckpt)

    logging.info("# Prepare test batches")
    test_batches, num_test_batches, num_test_samples = get_batch(
        hp.test1,
        hp.test1,
        100000,
        100000,
        hp.vocab,
        hp.test_batch_size,
        shuffle=False)
    iter = tf.data.Iterator.from_structure(test_batches.output_types,
                                           test_batches.output_shapes)
    xs, ys = iter.get_next()

    test_init_op = iter.make_initializer(test_batches)

    logging.info("# Load model")
    model = Transformer(hp)

    logging.info("# Session")
    with tf.Session() as sess:
        ckpt_ = tf.train.latest_checkpoint(hp.ckpt)
        ckpt = ckpt_ if ckpt_ else hp.ckpt
        saver = tf.train.Saver()

        saver.restore(sess, ckpt)

        y_hat, mean_loss = model.eval(sess, test_init_op, xs, ys,
                                      num_test_batches)

        logging.info("# get hypotheses")
        hypotheses = get_hypotheses(num_test_samples, y_hat, model.idx2token)

        logging.info("# write results")
        model_output = os.path.split(ckpt)[-1]
        if not os.path.exists(hp.testdir):
            os.makedirs(hp.testdir)
        translation = os.path.join(hp.testdir, model_output)
        with open(translation, 'w', encoding="utf-8") as fout:
            fout.write("\n".join(hypotheses))

        logging.info("# calc bleu score and append it to translation")
        calc_bleu_nltk(hp.test2, translation)
コード例 #3
0
def infer(hp):
    load_hparams(hp, hp.ckpt)

    # latest checkpoint
    ckpt_ = tf.train.latest_checkpoint(hp.ckpt)
    ckpt = ckpt_ if ckpt_ else hp.ckpt

    # load graph
    saver = tf.train.import_meta_graph(ckpt + '.meta', clear_devices=True)
    graph = tf.get_default_graph()

    # load tensor
    input_x = graph.get_tensor_by_name("input_x:0")
    is_training = graph.get_tensor_by_name("is_training:0")
    y_predict = graph.get_tensor_by_name("y_predict:0")

    # vocabulary
    token2idx, idx2token = load_vocab(hp.vocab)

    logging.info("# Session")
    with tf.Session() as sess:
        saver.restore(sess, ckpt)

        while True:
            text = input("请输入测试样本:")

            # tokens to ids
            tokens = [ch for ch in text] + ["</s>"]
            x = [token2idx.get(t, token2idx["<unk>"]) for t in tokens]

            # run calculation
            predict_result = sess.run(y_predict,
                                      feed_dict={
                                          input_x: [x],
                                          is_training: False
                                      })

            # ids to tokens
            token_pred = [
                idx2token.get(t_id, "#") for t_id in predict_result[0]
            ]
            translation = "".join(token_pred).split("</s>")[0]

            logging.info("  译文: {}".format(translation))

            time.sleep(0.1)
コード例 #4
0
def main(config):
    ### create Experiment Directory ###
    # combine all hyperparameters into a single file
    hparams = load_hparams(config.exp_config)
    hparams["model_config"] = load_hparams(config.model_config)

    # Create experiment directory
    sb.create_experiment_directory(
        experiment_directory=config.output_folder,
        hyperparams_to_save=config.exp_config,
        overrides=None,
    )

    ### Datasets and Tokenizer ###
    train_data, valid_data, test_data = dataio_prepare(hparams)

    # Trainer initialization
    run_opts = {"device": "cuda:0"} # certain args from yaml file will autoamtically get picked as run_opts

    # We download the tokenizer from HuggingFace (or elsewhere depending on
    # the path given in the YAML file).
    #run_on_main(hparams["pretrainer"].collect_files)
    #hparams["pretrainer"].load_collected(device=run_opts["device"])

    lm_brain = LM(
        modules=hparams["model_config"]["modules"],
        opt_class=hparams["model_config"]["optimizer"],
        hparams=hparams["model_config"],
        run_opts=run_opts,
        checkpointer=hparams["model_config"]["checkpointer"],
    )

    lm_brain.fit(
        lm_brain.hparams.epoch_counter,
        train_data,
        valid_data,
        train_loader_kwargs=hparams["model_config"]["train_dataloader_opts"],
        valid_loader_kwargs=hparams["model_config"]["valid_dataloader_opts"],
    )

    # evaluation
    test_stats = lm_brain.evaluate(
        test_data,
        min_key="loss",
        test_loader_kwargs=hparams["model_config"]["test_dataloader_opts"],
    )
コード例 #5
0
ファイル: templates.py プロジェクト: Traeyee/DickLearning
def export_pb_template(class_model):
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    os.environ['CUDA_VISIBLE_DEVICES'] = "-1"

    logging.info("# hparams")
    hparams = Hparams()
    parser = hparams.parser
    hp = parser.parse_args()
    load_hparams(hp, hp.logdir)
    context = Context(hp)

    params = {"maxlens": 0x3f3f}
    eval_batches, num_eval_batches, num_eval_samples = get_batch(
        fpath=hp.eval_data,
        task_type=hp.task_type,
        input_indices=context.input_indices,
        vocabs=context.vocabs,
        context=params,
        batch_size=hp.batch_size,
        shuffle=True)

    # create a iterator of the correct shape and type
    iterr = tf.data.Iterator.from_structure(eval_batches.output_types,
                                            eval_batches.output_shapes)
    inputs_and_target = iterr.get_next()

    model = class_model(context)
    _ = model.eval(inputs_and_target[:-1], inputs_and_target[-1])
    inference_name = model.get_inference_op_name()
    logging.info("inference_node_name:%s" % inference_name)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        ckpt = tf.train.latest_checkpoint(hp.logdir)
        saver.restore(sess, ckpt)
        inference_node_name = inference_name[:inference_name.find(":")]
        graph_def = tf.graph_util.convert_variables_to_constants(
            sess, sess.graph_def, output_node_names=[inference_node_name])
        tf.train.write_graph(graph_def,
                             './model',
                             '%s.pb' % hp.pb_name,
                             as_text=False)
        save_operation_specs(os.path.join("./model", '%s.ops' % hp.pb_name))
コード例 #6
0
def main(argv):
    """Evaluates energy and prints the result."""
    del argv  # Not used by main.
    hparams_path = os.path.join(FLAGS.checkpoint_dir, 'hparams.pbtxt')
    hparams = utils.load_hparams(hparams_path)
    hparams.parse(FLAGS.hparams)  # optional way to override some hparameters
    n_sites = hparams.num_sites

    # TODO(dkochkov) make a more comprehensive Hamiltonian construction method
    bonds_file_path = os.path.join(FLAGS.checkpoint_dir, 'J.txt')
    heisenberg_jx = FLAGS.heisenberg_jx
    if os.path.exists(bonds_file_path):
        heisenberg_data = np.genfromtxt(bonds_file_path, dtype=int)
        heisenberg_bonds = [[bond[0], bond[1]] for bond in heisenberg_data]
    else:
        heisenberg_bonds = [(i, (i + 1) % n_sites) for i in range(0, n_sites)]

    wavefunction = wavefunctions.build_wavefunction(hparams)
    hamiltonian = operators.HeisenbergHamiltonian(heisenberg_bonds,
                                                  heisenberg_jx, 1.)

    evaluator = evaluation.MonteCarloOperatorEvaluator()

    shared_resources = {}

    graph_building_args = {
        'wavefunction': wavefunction,
        'operator': hamiltonian,
        'hparams': hparams,
        'shared_resources': shared_resources
    }

    evaluation_ops = evaluator.build_eval_ops(**graph_building_args)

    init = tf.global_variables_initializer()
    session = tf.Session()
    session.run(init)

    checkpoint_manager = tf.train.Saver(wavefunction.get_trainable_variables())

    latest_checkpoint = tf.train.latest_checkpoint(hparams.checkpoint_dir)
    checkpoint_manager.restore(session, latest_checkpoint)

    data = evaluator.run_evaluation(evaluation_ops,
                                    session,
                                    hparams,
                                    epoch_num=0)
    mean_energy = np.mean(data)
    uncertainty = np.sqrt(np.std(data)) / len(data)
    print('Energy: {} +/- {}'.format(mean_energy, uncertainty))
コード例 #7
0
def export_model(hp):
    """
    export model checkpoint to pb file
    """
    load_hparams(hp, hp.ckpt)

    ckpt_ = tf.train.latest_checkpoint(hp.ckpt)
    ckpt = ckpt_ if ckpt_ else hp.ckpt

    saver = tf.train.import_meta_graph(ckpt + '.meta', clear_devices=True)
    graph = tf.get_default_graph()

    input_x = graph.get_tensor_by_name("input_x:0")
    decoder_input = graph.get_tensor_by_name("decoder_input:0")
    is_training = graph.get_tensor_by_name("is_training:0")
    y_predict = graph.get_tensor_by_name("y_predict:0")

    with tf.Session() as sess:
        saver.restore(sess, ckpt)  # restore graph

        builder = tf.saved_model.builder.SavedModelBuilder(hp.export_model_dir)
        inputs = {
            'input': tf.saved_model.utils.build_tensor_info(input_x),
            'decoder_input':
            tf.saved_model.utils.build_tensor_info(decoder_input),
            'is_training': tf.saved_model.utils.build_tensor_info(is_training)
        }
        outputs = {
            'y_predict': tf.saved_model.utils.build_tensor_info(y_predict)
        }

        signature = tf.saved_model.signature_def_utils.build_signature_def(
            inputs, outputs, 'signature')
        builder.add_meta_graph_and_variables(sess, ['classical2modern'],
                                             {'signature': signature})
        builder.save()
コード例 #8
0
ファイル: main.py プロジェクト: efikarra/vae
def create_or_load_hparams(out_dir, default_hparams, flags):
    """Create hparams or load hparams from out_dir."""
    hparams = utils.load_hparams(out_dir)
    if not hparams:
        hparams = default_hparams
        hparams = utils.maybe_parse_standard_hparams(hparams,
                                                     flags.hparams_path)
        hparams.add_hparam("x_dim", hparams.img_width * hparams.img_height)
    else:
        hparams = utils.ensure_compatible_hparams(hparams, default_hparams,
                                                  flags)

    # Save HParams
    utils.save_hparams(out_dir, hparams)

    # Print HParams
    utils.print_hparams(hparams)
    return hparams
コード例 #9
0
def create_or_load_hparams(out_dir, default_hparams, flags):
    # if the out_dir already contains hparams file, load these hparams.
    hparams = utils.load_hparams(out_dir)
    if not hparams:
        hparams = default_hparams
        hparams = utils.maybe_parse_standard_hparams(hparams,
                                                     flags.hparams_path)
        hparams = extend_hparams(hparams)
    else:
        #ensure that the loaded hparams and the command line hparams are compatible. If not, the command line hparams are overwritten!
        hparams = utils.ensure_compatible_hparams(hparams, default_hparams,
                                                  flags)

    # Save HParams
    utils.save_hparams(out_dir, hparams)

    # Print HParams
    print("Print hyperparameters:")
    utils.print_hparams(hparams)
    return hparams
コード例 #10
0
def create_or_load_hparams(
    out_dir, default_hparams, hparams_path, save_hparams=True):
  """Create hparams or load hparams from out_dir."""
  hparams = utils.load_hparams(out_dir)
  if not hparams:
    hparams = default_hparams
    hparams = utils.maybe_parse_standard_hparams(
        hparams, hparams_path)
  else:
    hparams = ensure_compatible_hparams(hparams, default_hparams, hparams_path)
  hparams = extend_hparams(hparams)

  # Save HParams
  if save_hparams:
    utils.save_hparams(out_dir, hparams)
    for metric in hparams.metrics:
      utils.save_hparams(getattr(hparams, "best_" + metric + "_dir"), hparams)

  # Print HParams
  utils.print_hparams(hparams)
  return hparams
コード例 #11
0
def main(config):
    ### get Train Data ###
    # list of {'audio_sph_file': str, 'transcript_all_file': str, 'transcript_uid': str, 'filter_criteria': str}
    # meaning that <audio_sph_file>'s transcript is the one in the <transcript_all_file> with id <transcript_uid>
    hparams = load_hparams(config.train_data_config)
    train_corpus = get_utterance_manifest_from_datasets(hparams["datasets"])
    
    ### create json file for SpeechBrain-->SentencePiece ###
    annotation_read = "transcript" # key-name for each `entry` in `train_corpus` having the transcript as its value

    ### write config file
    write_hyperpyyaml_file(os.path.join(config.output_folder, "sp_vocab_{}_{}.yaml".format(config.vocab_size, config.model_type)),
                           {"model_dir": config.output_folder,
                            "vocab_size": config.vocab_size,
                            "model_type": config.model_type,
                            "sp_model_file": os.path.join(config.output_folder, "{}_{}.model".format(str(config.vocab_size), config.model_type)),
                            "unk_index": config.unk_index,
                            "bos_index": config.bos_index,
                            "eos_index": config.eos_index,
                            "pad_index": config.pad_index})



    ### train custom SentencePiece Tokenizer ###
    with tempfile.NamedTemporaryFile(mode="w+", suffix=".json") as f:
        f.write(json.dumps(dict([(entry["transcript_uid"], {annotation_read: entry["transcript"]}) for entry in train_corpus])))
        f.seek(0) 

        SentencePiece(model_dir                = config.output_folder,
                      vocab_size               = config.vocab_size,
                      annotation_train         = f.name,
                      annotation_read          = annotation_read,
                      annotation_format        = "json",
                      unk_id                   = config.unk_index,
                      bos_id                   = config.bos_index,
                      eos_id                   = config.eos_index,
                      pad_id                   = config.pad_index,
                      model_type               = config.model_type,
                      character_coverage       = config.character_coverage,
                      annotation_list_to_check = config.annotation_list_to_check)
コード例 #12
0
def main():
    args = setup_args()
    logging.info(args)

    #Let us first load hparams for the trained model
    hparams = load_hparams(os.path.join(args.model_dir, 'hparams'))
    logging.info(hparams)

    #Let us create vocab table next
    vocab_table = lookup_ops.index_table_from_file(hparams.vocab,
                                                   default_value=0)
    list_candidate_vectors = get_candidate_vectors(vocab_table, args, hparams)
    candidate_vectors = convert_to_numpy_array(list_candidate_vectors)
    del list_candidate_vectors

    tf.reset_default_graph()
    vocab_table = lookup_ops.index_table_from_file(hparams.vocab,
                                                   default_value=0)
    iterator = create_candidates_with_gt_and_input_iterator(
        vocab_table, args.map, args.gt, args.txt1, args.scores_batch_size,
        args.max_len)

    model = EvalModel(hparams, iterator, candidate_vectors)

    with tf.Session() as sess:
        sess.run(tf.tables_initializer())
        sess.run(model.WC_assign)
        latest_ckpt = tf.train.latest_checkpoint(
            os.path.join(args.model_dir, args.best_model_dir))
        model.saver.restore(sess, latest_ckpt)
        all_scores, time_taken = model.compute_scores(sess)

    logging.info('Num scores: %d Time: %ds' % (len(all_scores), time_taken))
    with open(args.scores_pkl, 'wb') as fw:
        pkl.dump(all_scores, fw)

    get_retrieval_metrics(args.out_metrics, all_scores)
コード例 #13
0
ファイル: export_qap_model.py プロジェクト: zhuwenbo1988/nlp
# -*- coding: utf-8 -*-

import tensorflow as tf
from model import Transformer
from hparams import Hparams
from utils import load_hparams

ckpt_dir = 'log/1'

# 加载参数
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()

load_hparams(hp, ckpt_dir)

with tf.Session() as sess:

    input_ids_p = tf.placeholder(tf.int32, [None, None], name="input_ids")
    input_len_p = tf.placeholder(tf.int32, [None], name="input_len")

    m = Transformer(hp)
    # tf.constant(1) is useless
    xs = (input_ids_p, input_len_p, tf.constant(1))
    memory, _, _ = m.encode(xs, False)

    vector = tf.reduce_mean(memory, axis=1, name='avg_vector')

    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))
コード例 #14
0
    num_samples
    '''

    sents = [sent1.strip().encode('utf-8')]
    #batches = input_fn(sents, sents, vocab_fpath, batch_size, shuffle=shuffle)
    batches = generator_fn(sents, sents, vocab_fpath)
    return list(batches)[0]


logging.basicConfig(level=logging.INFO)

logging.info("# hparams")
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
load_hparams(hp, hp.ckpt)

logging.info("# Load model")

logging.info("# Load trained bpe model")
sp = spm.SentencePieceProcessor()
sp.Load("iwslt2016/segmented/bpe.model")

logging.info("# Session")
with tf.Session() as sess:
    ckpt_ = tf.train.latest_checkpoint(hp.ckpt)
    ckpt = hp.ckpt if ckpt_ is None else ckpt_  # None: ckpt is a file. otherwise dir.
    # tmp_text = "我 是 何 杰"
    # tmp_pieces = sp.EncodeAsPieces(tmp_text)
    # tmp_batches, num_tmp_batches, num_tmp_samples = get_batch_single(" ".join(tmp_pieces) + "\n",
    #                                                                     hp.vocab, 1, shuffle=False)
コード例 #15
0
ファイル: test.py プロジェクト: OuyKai/KEPN
import tensorflow as tf

from data_load import get_batch
from model import Transformer
from hparams import Hparams
from utils import get_hypotheses, load_hparams
import logging

logging.basicConfig(level=logging.INFO)

logging.info("# hparams")
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
load_hparams(hp, hp.logdir)


def beam_search(x, sess, g, batch_size=hp.batch_size):
    ''' it's not used in current version, will be added in the future'''
    inputs = np.reshape(np.transpose(np.array([x] * hp.beam_size), (1, 0, 2)),
                        (hp.beam_size * batch_size, hp.max_len))
    preds = np.zeros((batch_size, hp.beam_size, hp.y_max_len), np.int32)
    prob_product = np.zeros((batch_size, hp.beam_size))
    stc_length = np.ones((batch_size, hp.beam_size))

    for j in range(hp.y_max_len):
        _probs, _preds = sess.run(
            g.preds, {
                g.x: inputs,
                g.y: np.reshape(preds,
コード例 #16
0
def main():

    with tf.device(
            '/cpu:0'):  # cpu가 더 빠르다. gpu로 설정하면 Error. tf.device 없이 하면 더 느려진다.
        config = get_arguments()
        started_datestring = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
        logdir = os.path.join(config.logdir, 'generate', started_datestring)
        print('logdir0-------------' + logdir)

        if not os.path.exists(logdir):
            os.makedirs(logdir)

        load_hparams(hparams, config.checkpoint_dir)

        sess = tf.Session()
        scalar_input = hparams.scalar_input
        net = WaveNetModel(
            batch_size=config.batch_size,
            dilations=hparams.dilations,
            filter_width=hparams.filter_width,
            residual_channels=hparams.residual_channels,
            dilation_channels=hparams.dilation_channels,
            quantization_channels=hparams.quantization_channels,
            out_channels=hparams.out_channels,
            skip_channels=hparams.skip_channels,
            use_biases=hparams.use_biases,
            scalar_input=hparams.scalar_input,
            global_condition_channels=hparams.gc_channels,
            global_condition_cardinality=config.gc_cardinality,
            local_condition_channels=hparams.num_mels,
            upsample_factor=hparams.upsample_factor,
            legacy=hparams.legacy,
            residual_legacy=hparams.residual_legacy,
            train_mode=False
        )  # train 단계에서는 global_condition_cardinality를 AudioReader에서 파악했지만, 여기서는 넣어주어야 함

        if scalar_input:
            samples = tf.placeholder(tf.float32, shape=[net.batch_size, None])
        else:
            samples = tf.placeholder(
                tf.int32, shape=[net.batch_size, None]
            )  # samples: mu_law_encode로 변환된 것. one-hot으로 변환되기 전. (batch_size, 길이)

        # local condition이 (N,T,num_mels) 여야 하지만, 길이 1까지로 들어가야하기 때무넹, (N,1,num_mels) --> squeeze하면 (N,num_mels)
        upsampled_local_condition = tf.placeholder(
            tf.float32, shape=[net.batch_size, hparams.num_mels])

        next_sample = net.predict_proba_incremental(
            samples, upsampled_local_condition, [config.gc_id] * net.batch_size
        )  # Fast Wavenet Generation Algorithm-1611.09482 algorithm 적용

        # making local condition data. placeholder - upsampled_local_condition 넣어줄 upsampled local condition data를 만들어 보자.
        print('logdir0-------------' + logdir)
        mel_input = np.load(config.mel)
        sample_size = mel_input.shape[0] * hparams.hop_size
        mel_input = np.tile(mel_input, (config.batch_size, 1, 1))
        with tf.variable_scope('wavenet', reuse=tf.AUTO_REUSE):
            upsampled_local_condition_data = net.create_upsample(
                mel_input, upsample_type=hparams.upsample_type)

        var_list = [
            var for var in tf.global_variables() if 'queue' not in var.name
        ]
        saver = tf.train.Saver(var_list)
        print('Restoring model from {}'.format(config.checkpoint_dir))

        load(saver, sess, config.checkpoint_dir)
        init_op = tf.group(tf.initialize_all_variables(),
                           net.queue_initializer)

        sess.run(init_op)  # 이 부분이 없으면, checkpoint에서 복원된 값들이 들어 있다.

        quantization_channels = hparams.quantization_channels
        if config.wav_seed:
            # wav_seed의 길이가 receptive_field보다 작으면, padding이라도 해야 되는 거 아닌가? 그냥 짧으면 짧은 대로 return함  --> 그래서 너무 짧으면 error
            seed = create_seed(config.wav_seed, hparams.sample_rate,
                               quantization_channels, net.receptive_field,
                               scalar_input)  # --> mu_law encode 된 것.
            if scalar_input:
                waveform = seed.tolist()
            else:
                waveform = sess.run(
                    seed).tolist()  # [116, 114, 120, 121, 127, ...]

            print('Priming generation...')
            for i, x in enumerate(waveform[-net.receptive_field:-1]
                                  ):  # 제일 마지막 1개는 아래의 for loop의 첫 loop에서 넣어준다.
                if i % 100 == 0:
                    print('Priming sample {}/{}'.format(
                        i, net.receptive_field),
                          end='\r')
                sess.run(next_sample,
                         feed_dict={
                             samples:
                             np.array([x] * net.batch_size).reshape(
                                 net.batch_size, 1),
                             upsampled_local_condition:
                             np.zeros([net.batch_size, hparams.num_mels])
                         })
            print('Done.')
            waveform = np.array([waveform[-net.receptive_field:]] *
                                net.batch_size)
        else:
            # Silence with a single random sample at the end.
            if scalar_input:
                waveform = [0.0] * (net.receptive_field - 1)
                waveform = np.array(waveform * net.batch_size).reshape(
                    net.batch_size, -1)
                waveform = np.concatenate(
                    [
                        waveform, 2 * np.random.rand(net.batch_size).reshape(
                            net.batch_size, -1) - 1
                    ],
                    axis=-1)  # -1~1사이의 random number를 만들어 끝에 붙힌다.
                # wavefor: shape(batch_size,net.receptive_field )
            else:
                waveform = [quantization_channels / 2] * (
                    net.receptive_field - 1
                )  # 필요한 receptive_field 크기보다 1개 작게 만든 후, 아래에서 random하게 1개를 덧붙힌다.
                waveform = np.array(waveform * net.batch_size).reshape(
                    net.batch_size, -1)
                waveform = np.concatenate(
                    [
                        waveform,
                        np.random.randint(quantization_channels,
                                          size=net.batch_size).reshape(
                                              net.batch_size, -1)
                    ],
                    axis=-1)  # one hot 변환 전. (batch_size, 5117)

        start_time = time.time()
        upsampled_local_condition_data = sess.run(
            upsampled_local_condition_data)
        last_sample_timestamp = datetime.now()
        for step in range(sample_size):  # 원하는 길이를 구하기 위해 loop sample_size

            window = waveform[:,
                              -1:]  # 제일 끝에 있는 1개만 samples에 넣어 준다.  window: shape(N,1)

            # Run the WaveNet to predict the next sample.

            # fast가 아닌경우. window: [128.0, 128.0, ..., 128.0, 178, 185]
            # fast인 경우, window는 숫자 1개.
            prediction = sess.run(
                next_sample,
                feed_dict={
                    samples:
                    window,
                    upsampled_local_condition:
                    upsampled_local_condition_data[:, step, :]
                }
            )  # samples는 mu law encoding된 것. 계산 과정에서 one hot으로 변환된다.  --> (batch_size,256)

            if scalar_input:
                sample = prediction  # logistic distribution으로부터 sampling 되었기 때문에, randomness가 있다.
            else:
                # Scale prediction distribution using temperature.
                # 다음 과정은 config.temperature==1이면 각 원소를 합으로 나누어주는 것에 불과. 이미 softmax를 적용한 겂이므로, 합이 1이된다. 그래서 값의 변화가 없다.
                # config.temperature가 1이 아니며, 각 원소의 log취한 값을 나눈 후, 합이 1이 되도록 rescaling하는 것이 된다.
                np.seterr(divide='ignore')
                scaled_prediction = np.log(
                    prediction
                ) / config.temperature  # config.temperature인 경우는 값의 변화가 없다.
                scaled_prediction = (
                    scaled_prediction - np.logaddexp.reduce(
                        scaled_prediction, axis=-1, keepdims=True)
                )  # np.log(np.sum(np.exp(scaled_prediction)))
                scaled_prediction = np.exp(scaled_prediction)
                np.seterr(divide='warn')

                # Prediction distribution at temperature=1.0 should be unchanged after
                # scaling.
                if config.temperature == 1.0:
                    np.testing.assert_allclose(
                        prediction,
                        scaled_prediction,
                        atol=1e-5,
                        err_msg=
                        'Prediction scaling at temperature=1.0 is not working as intended.'
                    )

                # argmax로 선택하지 않기 때문에, 같은 입력이 들어가도 달라질 수 있다.
                sample = [[
                    np.random.choice(np.arange(quantization_channels), p=p)
                ] for p in scaled_prediction]  # choose one sample per batch

            waveform = np.concatenate([waveform, sample],
                                      axis=-1)  #window.shape: (N,1)

            # Show progress only once per second.
            current_sample_timestamp = datetime.now()
            time_since_print = current_sample_timestamp - last_sample_timestamp
            if time_since_print.total_seconds() > 1.:
                duration = time.time() - start_time
                print('Sample {:3<d}/{:3<d}, ({:.3f} sec/step)'.format(
                    step + 1, sample_size, duration),
                      end='\r')
                last_sample_timestamp = current_sample_timestamp

        # Introduce a newline to clear the carriage return from the progress.
        print()

        # Save the result as a wav file.
        if hparams.input_type == 'raw':
            out = waveform[:, net.receptive_field:]
        elif hparams.input_type == 'mulaw':
            decode = mu_law_decode(samples,
                                   quantization_channels,
                                   quantization=False)
            out = sess.run(
                decode, feed_dict={samples: waveform[:, net.receptive_field:]})
        else:  # 'mulaw-quantize'
            decode = mu_law_decode(samples,
                                   quantization_channels,
                                   quantization=True)
            out = sess.run(
                decode, feed_dict={samples: waveform[:, net.receptive_field:]})

        # save wav

        for i in range(net.batch_size):
            config.wav_out_path = logdir + '/test-{}.wav'.format(i)
            mel_path = config.wav_out_path.replace(".wav", ".png")

            gen_mel_spectrogram = audio.melspectrogram(out[i], hparams).astype(
                np.float32).T
            audio.save_wav(out[i], config.wav_out_path,
                           hparams.sample_rate)  # save_wav 내에서 out[i]의 값이 바뀐다.

            plot.plot_spectrogram(gen_mel_spectrogram,
                                  mel_path,
                                  title='generated mel spectrogram',
                                  target_spectrogram=mel_input[i])
        print('Finished generating.')
コード例 #17
0
def main(argv):
    """Runs supervised wavefunction optimization.

  This pipeline optimizes wavefunction by matching amplitudes of a target state.

  """
    del argv  # Not used.

    supervisor_path = os.path.join(FLAGS.supervisor_dir, 'hparams.pbtxt')
    supervisor_hparams = utils.load_hparams(supervisor_path)

    hparams = utils.create_hparams()
    hparams.set_hparam('num_sites', supervisor_hparams.num_sites)
    hparams.set_hparam('checkpoint_dir', FLAGS.checkpoint_dir)
    hparams.set_hparam('supervisor_dir', FLAGS.supervisor_dir)
    hparams.set_hparam('basis_file_path', FLAGS.basis_file_path)
    hparams.set_hparam('num_epochs', FLAGS.num_epochs)
    hparams.set_hparam('wavefunction_type', FLAGS.wavefunction_type)
    hparams.parse(FLAGS.hparams)
    hparams_path = os.path.join(hparams.checkpoint_dir, 'hparams.pbtxt')

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)

    if os.path.exists(hparams_path) and not FLAGS.override:
        print('Hparams file already exists')
        exit()

    with tf.gfile.GFile(hparams_path, 'w') as file:
        file.write(str(hparams.to_proto()))

    target_wavefunction = wavefunctions.build_wavefunction(supervisor_hparams)
    wavefunction = wavefunctions.build_wavefunction(hparams)

    wavefunction_optimizer = training.SUPERVISED_OPTIMIZERS[FLAGS.optimizer]()

    shared_resources = {}

    graph_building_args = {
        'wavefunction': wavefunction,
        'target_wavefunction': target_wavefunction,
        'hparams': hparams,
        'shared_resources': shared_resources
    }

    train_ops = wavefunction_optimizer.build_opt_ops(**graph_building_args)

    session = tf.Session()
    init = tf.global_variables_initializer()
    init_l = tf.local_variables_initializer()
    session.run([init, init_l])

    target_saver = tf.train.Saver(
        target_wavefunction.get_trainable_variables())
    supervisor_checkpoint = tf.train.latest_checkpoint(FLAGS.supervisor_dir)
    target_saver.restore(session, supervisor_checkpoint)
    checkpoint_saver = tf.train.Saver(wavefunction.get_trainable_variables(),
                                      max_to_keep=5)

    if FLAGS.resume_training:
        latest_checkpoint = tf.train.latest_checkpoint(hparams.checkpoint_dir)
        checkpoint_saver.restore(session, latest_checkpoint)

    for epoch_number in range(FLAGS.num_epochs):
        wavefunction_optimizer.run_optimization_epoch(train_ops, session,
                                                      hparams, epoch_number)
        if epoch_number % FLAGS.checkpoint_frequency == 0:
            checkpoint_name = 'model_after_{}_epochs'.format(epoch_number)
            save_path = os.path.join(hparams.checkpoint_dir, checkpoint_name)
            checkpoint_saver.save(session, save_path)

    if FLAGS.generate_vectors:
        vector_generator = evaluation.VectorWavefunctionEvaluator()
        eval_ops = vector_generator.build_eval_ops(wavefunction, None, hparams,
                                                   shared_resources)
        vector_generator.run_evaluation(eval_ops, session, hparams,
                                        FLAGS.num_epochs)
コード例 #18
0
def main():
    config = get_arguments()
    started_datestring = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
    logdir = os.path.join(config.logdir, 'generate', started_datestring)

    if not os.path.exists(logdir):
        os.makedirs(logdir)

    load_hparams(hparams, config.checkpoint_dir)

    with tf.device('/cpu:0'):

        sess = tf.Session()
        scalar_input = hparams.scalar_input
        net = WaveNetModel(
            batch_size=BATCH_SIZE,
            dilations=hparams.dilations,
            filter_width=hparams.filter_width,
            residual_channels=hparams.residual_channels,
            dilation_channels=hparams.dilation_channels,
            quantization_channels=hparams.quantization_channels,
            out_channels=hparams.out_channels,
            skip_channels=hparams.skip_channels,
            use_biases=hparams.use_biases,
            scalar_input=scalar_input,
            initial_filter_width=hparams.initial_filter_width,
            global_condition_channels=hparams.gc_channels,
            global_condition_cardinality=config.gc_cardinality,
            train_mode=False
        )  # train 단계에서는 global_condition_cardinality를 AudioReader에서 파악했지만, 여기서는 넣어주어야 함

        if scalar_input:
            samples = tf.placeholder(tf.float32, shape=[net.batch_size, None])
        else:
            samples = tf.placeholder(
                tf.int32, shape=[net.batch_size, None]
            )  # samples: mu_law_encode로 변환된 것. one-hot으로 변환되기 전. (batch_size, 길이)

        next_sample = net.predict_proba_incremental(
            samples, [config.gc_id] * net.batch_size
        )  # Fast Wavenet Generation Algorithm-1611.09482 algorithm 적용

        var_list = [
            var for var in tf.global_variables() if 'queue' not in var.name
        ]
        saver = tf.train.Saver(var_list)
        print('Restoring model from {}'.format(config.checkpoint_dir))

        load(saver, sess, config.checkpoint_dir)

        sess.run(
            net.queue_initializer)  # 이 부분이 없으면, checkpoint에서 복원된 값들이 들어 있다.

        quantization_channels = hparams.quantization_channels
        if config.wav_seed:
            # wav_seed의 길이가 receptive_field보다 작으면, padding이라도 해야 되는 거 아닌가? 그냥 짧으면 짧은 대로 return함  --> 그래서 너무 짧으면 error
            seed = create_seed(config.wav_seed, hparams.sample_rate,
                               quantization_channels, net.receptive_field,
                               scalar_input)  # --> mu_law encode 된 것.
            if scalar_input:
                waveform = seed.tolist()
            else:
                waveform = sess.run(
                    seed).tolist()  # [116, 114, 120, 121, 127, ...]

            print('Priming generation...')
            for i, x in enumerate(waveform[-net.receptive_field:-1]
                                  ):  # 제일 마지막 1개는 아래의 for loop의 첫 loop에서 넣어준다.
                if i % 100 == 0:
                    print('Priming sample {}'.format(i))
                sess.run(next_sample,
                         feed_dict={
                             samples:
                             np.array([x] * net.batch_size).reshape(
                                 net.batch_size, 1)
                         })
            print('Done.')
            waveform = np.array([waveform[-net.receptive_field:]] *
                                net.batch_size)
        else:
            # Silence with a single random sample at the end.
            if scalar_input:
                waveform = [0.0] * (net.receptive_field - 1)
                waveform = np.array(waveform * net.batch_size).reshape(
                    net.batch_size, -1)
                waveform = np.concatenate(
                    [
                        waveform, 2 * np.random.rand(net.batch_size).reshape(
                            net.batch_size, -1) - 1
                    ],
                    axis=-1)  # -1~1사이의 random number를 만들어 끝에 붙힌다.
            else:
                waveform = [quantization_channels / 2] * (
                    net.receptive_field - 1
                )  # 필요한 receptive_field 크기보다 1개 작게 만든 후, 아래에서 random하게 1개를 덧붙힌다.
                waveform = np.array(waveform * net.batch_size).reshape(
                    net.batch_size, -1)
                waveform = np.concatenate(
                    [
                        waveform,
                        np.random.randint(quantization_channels,
                                          size=net.batch_size).reshape(
                                              net.batch_size, -1)
                    ],
                    axis=-1)  # one hot 변환 전. (batch_size, 5117)

        last_sample_timestamp = datetime.now()
        for step in range(config.samples):  # 원하는 길이를 구하기 위해 loop

            window = waveform[:, -1:]  # 제일 끝에 있는 1개만 samples에 넣어 준다.

            # Run the WaveNet to predict the next sample.

            # fast가 아닌경우. window: [128.0, 128.0, ..., 128.0, 178, 185]
            # fast인 경우, window는 숫자 1개.
            prediction = sess.run(
                next_sample, feed_dict={samples: window}
            )  # samples는 mu law encoding된 것. 계산 과정에서 one hot으로 변환된다.  --> (batch_size,256)

            if scalar_input:
                sample = prediction
            else:
                # Scale prediction distribution using temperature.
                # 다음 과정은 config.temperature==1이면 각 원소를 합으로 나누어주는 것에 불과. 이미 softmax를 적용한 겂이므로, 합이 1이된다. 그래서 값의 변화가 없다.
                # config.temperature가 1이 아니며, 각 원소의 log취한 값을 나눈 후, 합이 1이 되도록 rescaling하는 것이 된다.
                np.seterr(divide='ignore')
                scaled_prediction = np.log(
                    prediction
                ) / config.temperature  # config.temperature인 경우는 값의 변화가 없다.
                scaled_prediction = (
                    scaled_prediction - np.logaddexp.reduce(
                        scaled_prediction, axis=-1, keepdims=True)
                )  # np.log(np.sum(np.exp(scaled_prediction)))
                scaled_prediction = np.exp(scaled_prediction)
                np.seterr(divide='warn')

                # Prediction distribution at temperature=1.0 should be unchanged after
                # scaling.
                if config.temperature == 1.0:
                    np.testing.assert_allclose(
                        prediction,
                        scaled_prediction,
                        atol=1e-5,
                        err_msg=
                        'Prediction scaling at temperature=1.0 is not working as intended.'
                    )

                sample = [[
                    np.random.choice(np.arange(quantization_channels), p=p)
                ] for p in scaled_prediction]  # choose one sample per batch

            waveform = np.concatenate([waveform, sample], axis=-1)

            # Show progress only once per second.
            current_sample_timestamp = datetime.now()
            time_since_print = current_sample_timestamp - last_sample_timestamp
            if time_since_print.total_seconds() > 1.:
                print('Sample {:3<d}/{:3<d}'.format(step + 1, config.samples),
                      end='\r')
                last_sample_timestamp = current_sample_timestamp

        # Introduce a newline to clear the carriage return from the progress.
        print()

        # Save the result as a wav file.
        if scalar_input:
            out = waveform
        else:
            decode = mu_law_decode(samples, quantization_channels)
            out = sess.run(decode, feed_dict={samples: waveform})
        for i in range(net.batch_size):
            config.wav_out_path = logdir + '/test-{}.wav'.format(i)
            write_wav(out[i], hparams.sample_rate, config.wav_out_path)

        print('Finished generating.')
コード例 #19
0
        'lrD': 0.0001,
        'lrG': 0.0001,        
        'beta1': 0.5,
        'beta2': 0.999,
        'nD': 1,
        'nG': 2,
        'image_interval': 1,
        'save_interval': 2,
        'dataroot': '/home/raynor/datasets/april/velocity/',
        'modelroot': '/home/raynor/code/seismogan/saved/',
        'load_name': 'None',
        'load_step': -1,
        }
    
    # Load params from text file
    hparams = load_hparams(args.hparams,defaults)
    
    device = torch.device(f"cuda:{args.gpu}" if (torch.cuda.is_available()) else "cpu")
    print(f'Using device: {device}')
                       
    print('Entering Hyperparameter Loop')
        
    for i,h in enumerate(hparams):
        
        with SummaryWriter(comment=f'_{h.name}') as writer:
            
            print(f'Run name: {h.name}')

            writer.add_hparams(vars(h),{})
                    
            dataset = BasicDataset(model_dir=h.dataroot)
def main(config):
    ### create Experiment Directory ###
    # combine all hyperparameters into a single file
    hparams = load_hparams(config.exp_config)
    hparams["model_config"] = load_hparams(config.model_config)

    # create exp dir
    sb.create_experiment_directory(experiment_directory=config.output_folder,
                                   hyperparams_to_save=config.exp_config,
                                   overrides=None)

    ### Datasets and Tokenizer ###
    train_data, valid_data, test_data, tokenizer = dataio_prepare(hparams)

    # Trainer initialization
    run_opts = {
        "device": "cuda:0"
    }  # certain args from yaml file will autoamtically get picked as run_opts
    # see https://github.com/speechbrain/speechbrain/blob/develop/recipes/LibriSpeech/ASR/transformer/train.py#L372
    # see https://github.com/speechbrain/speechbrain/blob/d6adc40e742107c34ae38dc63484171938b4d237/speechbrain/core.py#L124
    #print(type(hparams["model_config"]["modules"]))
    #print(type(hparams))
    #exit()
    asr_brain = ASR(
        modules=hparams["model_config"]["modules"],
        opt_class=hparams["model_config"]["Adam"],
        hparams=hparams["model_config"],
        run_opts=run_opts,
        checkpointer=hparams["model_config"]["checkpointer"],
    )

    # adding objects to trainer:
    asr_brain.tokenizer = tokenizer  # hparams["tokenizer"]

    # Training
    asr_brain.fit(
        asr_brain.hparams.epoch_counter,
        train_data,
        valid_data,
        train_loader_kwargs=hparams["model_config"]["train_dataloader_opts"],
        valid_loader_kwargs=hparams["model_config"]["valid_dataloader_opts"],
    )

    raise NotImplementedError

    ### get Train Data ###
    # list of {'audio__file': str, 'transcript_all_file': str, 'transcript_uid': str, 'filter_criteria': str}
    # meaning that <audio__file>'s transcript is the one in the <transcript_all_file> with id <transcript_uid>
    train_corpus = get_utterance_manifest_from_data_config(
        config.train_data_config)
    for x in train_corpus:
        assert os.path.exists(
            x["transcript_all_file"]
        ), "data transcript file {} does not exist! Exiting!".format(
            x["transcript_all_file"])

    ### create json file for SpeechBrain-->SentencePiece ###
    selected_transcripts_json, annotation_read = create_transcripts_json(
        train_corpus)

    ### train custom SentencePiece Tokenizer ###
    with tempfile.NamedTemporaryFile(mode="w+", suffix=".json") as f:
        f.write(json.dumps(selected_transcripts_json))
        f.seek(0)

        SentencePiece(model_dir=config.output_folder,
                      vocab_size=config.vocab_size,
                      annotation_train=f.name,
                      annotation_read=annotation_read,
                      annotation_format="json",
                      model_type=config.model_type,
                      character_coverage=config.character_coverage,
                      annotation_list_to_check=config.annotation_list_to_check)
コード例 #21
0
ファイル: templates.py プロジェクト: Traeyee/DickLearning
def train_template(class_model,
                   shuffle=True,
                   save_model=True):  # 大数据集耗时请关掉shuffle,调参请关掉save_model
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    logging.info("# hparams")
    hparams = Hparams()
    parser = hparams.parser
    hp = parser.parse_args()
    run_type = hp.run_type
    logdir = hp.logdir
    batch_size = hp.batch_size
    num_epochs = hp.num_epochs
    task_type = hp.task_type
    assert hp.run_type in ("new", "continue", "finetune")
    if "continue" == hp.run_type:
        load_hparams(hp, logdir)
        batch_size = hp.batch_size
        if task_type is not None:
            assert task_type == hp.task_type
        task_type = hp.task_type
    assert task_type is not None
    context = Context(hp)
    logging.info("# Prepare train/eval batches")
    logging.info("Use %s for training set", hp.train_data)
    logging.info("Use %s for evaluation set", hp.eval_data)
    eval_batches, num_eval_batches, num_eval_samples = get_batch(
        fpath=hp.eval_data,
        task_type=task_type,
        input_indices=context.input_indices,
        vocabs=context.vocabs,
        context=context,
        batch_size=batch_size,
        shuffle=False)
    train_batches, num_train_batches, num_train_samples = get_batch(
        fpath=hp.train_data,
        task_type=task_type,
        input_indices=context.input_indices,
        vocabs=context.vocabs,
        context=context,
        batch_size=batch_size,
        shuffle=shuffle)

    # create a iterator of the correct shape and type
    iterr = tf.data.Iterator.from_structure(train_batches.output_types,
                                            train_batches.output_shapes)
    inputs_and_target = iterr.get_next()

    # 照抄即可,目前不是很熟悉这些接口
    train_init_op = iterr.make_initializer(train_batches)
    eval_init_op = iterr.make_initializer(eval_batches)
    model = class_model(context)
    loss, train_op, global_step, train_summaries = model.train(
        inputs=inputs_and_target[:-1], targets=inputs_and_target[-1])
    eval_ouputs, eval_summaries = model.eval(inputs=inputs_and_target[:-1],
                                             targets=inputs_and_target[-1])
    inference_name = model.get_inference_op_name()
    logging.info("inference_node_name:%s" % inference_name)

    logging.info("# Session")
    saver = tf.train.Saver(max_to_keep=num_epochs)
    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:
        time_sess = time.time()
        ckpt = tf.train.latest_checkpoint(logdir)
        if ckpt is None or "new" == run_type:  # 新建
            save_hparams(hp, logdir)
            logging.info("Initializing from scratch")
            sess.run(tf.global_variables_initializer())
        else:  # continue OR finetune
            saver.restore(sess, ckpt)
            if "finetune" == hp.run_type:  # finetune
                save_hparams(hp, logdir)

        save_variable_specs(os.path.join(logdir, "var_specs"))
        save_operation_specs(os.path.join(logdir, "op_specs"))
        f_debug = open(os.path.join(logdir, "debug.txt"), "a")
        summary_writer = tf.summary.FileWriter(logdir, sess.graph)
        if hp.zero_step:
            sess.run(global_step.assign(0))

        sess.run(train_init_op)
        total_steps = num_epochs * num_train_batches
        logging.info("total_steps:%s, num_epochs:%s, num_train_batches:%s",
                     total_steps, num_epochs, num_train_batches)
        _gs = sess.run(global_step)
        logging.info("global_step is stated at %s", _gs)
        t_epoch = time.time()
        model_output = 'default'
        for i in tqdm(range(_gs, total_steps + 1)):
            ts = time.time()
            # f_debug.write("loss\n")
            # tensor_tmp = tf.get_default_graph().get_tensor_by_name("loss:0")
            # np.savetxt(f_debug, tensor_tmp.eval().reshape([1]), delimiter=', ', footer="=" * 64)
            _, _gs, _summary = sess.run(
                [train_op, global_step, train_summaries])
            epoch = math.ceil(_gs / num_train_batches)
            f_debug.write("train: epoch %s takes %s\n" %
                          (epoch, time.time() - ts))
            summary_writer.add_summary(_summary, _gs)

            if _gs and _gs % num_train_batches == 0:
                logging.info("epoch {} is done".format(epoch))

                # train loss
                _loss = sess.run(loss)
                # eval
                logging.info("# eval evaluation")
                _, _eval_summaries = sess.run([eval_init_op, eval_summaries])
                summary_writer.add_summary(_eval_summaries, _gs)
                if save_model:
                    # save checkpoint
                    logging.info("# save models")
                    model_output = "model%02dL%.2f" % (epoch, _loss)
                    ckpt_name = os.path.join(logdir, model_output)
                    saver.save(sess, ckpt_name, global_step=_gs)
                    logging.info(
                        "after training of {} epochs, {} has been saved.".
                        format(epoch, ckpt_name))
                # proceed to next epoch
                logging.info("# fall back to train mode")
                ts = time.time()
                sess.run(train_init_op)
                logging.info("fallback_train: %s\t%s\t%s takes %s" %
                             (i, _gs, epoch, time.time() - ts))
                logging.info("epoch %s takes %s", epoch, time.time() - t_epoch)
                t_epoch = time.time()
        summary_writer.close()
        logging.info("Session runs for %s", time.time() - time_sess)
        if save_model:
            # save to pb
            inference_node_name = inference_name[:inference_name.find(":")]
            graph_def = tf.graph_util.convert_variables_to_constants(
                sess, sess.graph_def, output_node_names=[inference_node_name])
            tf.train.write_graph(graph_def,
                                 logdir,
                                 '%s.pb' % model_output,
                                 as_text=False)
    f_debug.close()
    logging.info("Done")
コード例 #22
0
import tensorflow as tf

from data_load import get_batch
from model import Transformer
from hparams import Hparams
from utils import get_hypotheses, calc_bleu, load_hparams
import logging
from tqdm import tqdm

logging.basicConfig(level=logging.INFO)

logging.info("# hparams")
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
load_hparams(hp, hp.modeldir)

logging.info("# Prepare test batches")
test_batches, num_test_batches, num_test_samples = get_batch(
    hp.test_source,
    hp.test_target,
    100000,
    100000,
    hp.vocab,
    hp.test_batch_size,
    shuffle=False)
iter = tf.data.Iterator.from_structure(test_batches.output_types,
                                       test_batches.output_shapes)
xs, ys = iter.get_next()

test_init_op = iter.make_initializer(test_batches)
コード例 #23
0
ファイル: train.py プロジェクト: Traeyee/DickLearning
from dssm.model import DSSM

logger = logging.getLogger()
logger.setLevel(logging.INFO)

logging.info("# hparams")
hparams = Hparams()
parser = hparams.parser
hp = parser.parse_args()
run_type = hp.run_type
logdir = hp.logdir
batch_size = hp.batch_size
num_epochs = hp.num_epochs
assert hp.run_type in ("new", "continue", "finetune")
if "continue" == hp.run_type:
    load_hparams(hp, logdir)
    batch_size = hp.batch_size
context = Context(hp)

assert hp.train_data is not None
logging.info("# Prepare train/eval batches")
logging.info("Use %s for training set", hp.train_data)
params = {"maxlen1": hp.maxlen1, "maxlen2": hp.maxlen2}
train_batches, num_train_batches, num_train_samples = get_batch(
    fpath=hp.train_data,
    task_type="set2sca",
    num_inputfields=2,
    params=params,
    vocab_fpath=context.vocab,
    batch_size=batch_size,
    shuffle=True)