Exemplo n.º 1
0
with strategy.scope():
    mt = MusicTransformer(embedding_dim=256,
                          vocab_size=par.vocab_size,
                          num_layer=6,
                          max_seq=max_seq,
                          dropout=0.2,
                          debug=False,
                          loader_path=load_path)
    mt.compile(optimizer=opt, loss=callback.transformer_dist_train_loss)

    # Train Start
    for e in range(epochs):
        mt.reset_metrics()
        for b in range(len(dataset.files) // batch_size):
            try:
                batch_x, batch_y = dataset.seq2seq_batch(batch_size, max_seq)
            except:
                continue
            result_metrics = mt.train_on_batch(batch_x, batch_y)
            if b % 100 == 0:
                eval_x, eval_y = dataset.seq2seq_batch(batch_size, max_seq,
                                                       'eval')
                eval_result_metrics = mt.evaluate(eval_x, eval_y)
                mt.save(save_path)
                print('\n====================================================')
                print('Epoch/Batch: {}/{}'.format(e, b))
                print('Train >>>> Loss: {:6.6}, Accuracy: {}'.format(
                    result_metrics[0], result_metrics[1]))
                print('Eval >>>> Loss: {:6.6}, Accuracy: {}'.format(
                    eval_result_metrics[0], eval_result_metrics[1]))
Exemplo n.º 2
0
eval_summary_writer = tf.summary.create_file_writer(eval_log_dir)

# Train Start
idx = 0
for e in range(epochs):
    mt.reset_metrics()
    for b in range(len(dataset.files) // batch_size):
        try:
            batch_x, batch_y = dataset.seq2seq_batch(batch_size, max_seq)
        except:
            continue
        result_metrics = mt.train_on_batch(batch_x, batch_y)

        if b % 100 == 0:
            eval_x, eval_y = dataset.seq2seq_batch(batch_size, max_seq, 'eval')
            eval_result_metrics, weights = mt.evaluate(eval_x, eval_y)
            mt.save(save_path)
            with train_summary_writer.as_default():
                tf.summary.scalar('loss', result_metrics[0], step=idx)
                tf.summary.scalar('accuracy', result_metrics[1], step=idx)
                for i, weight in enumerate(weights):
                    with tf.name_scope("layer_%d" % i):
                        with tf.name_scope("_w0"):
                            utils.attention_image_summary(weight[0])
                        with tf.name_scope("_w1"):
                            utils.attention_image_summary(weight[1])

            with eval_summary_writer.as_default():
                tf.summary.scalar('loss', eval_result_metrics[0], step=idx)
                tf.summary.scalar('accuracy', eval_result_metrics[1], step=idx)
            idx += 1