Esempio n. 1
0
def main(argv):
    test_data = Path.cwd() / 'data_in' / 'test.txt'

    with open(Path.cwd() / 'data_in' / 'vocab.pkl', mode='rb') as io:
        vocab = pickle.load(io)

    test = tf.data.TextLineDataset(str(test_data)).batch(batch_size=FLAGS.batch_size)

    tokenized = Mecab()
    processing = Corpus(vocab=vocab, tokenizer=tokenized)

    # init params
    classes = FLAGS.classes
    max_length = FLAGS.length
    epochs = FLAGS.epochs
    batch_size = FLAGS.batch_size
    learning_rate = FLAGS.learning_rate

    # create model
    sen_cnn = SenCNN(vocab=vocab, classes=classes)

    # create optimizer & loss_fn
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy()

    test_loss_metric = tf.keras.metrics.Mean(name='val_loss')
    test_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')

    ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=sen_cnn)
    manager = tf.train.CheckpointManager(ckpt, './data_out/tf_ckpts', max_to_keep=3)
    ckpt.restore(manager.latest_checkpoint)

    if manager.latest_checkpoint:
        print("Restored from {}".format(manager.latest_checkpoint))
    else:
        print("Initializing from scratch.")

    tf.keras.backend.set_learning_phase(0)
    test_loss_metric.reset_states()
    test_acc_metric.reset_states()

    for step, val in enumerate(test):
        data, label = processing.token2idex(val)
        logits = sen_cnn(data)
        val_loss = loss_fn(label, logits)
        # val_loss += mb_loss.numpy()
        test_loss_metric.update_state(val_loss)
        test_acc_metric.update_state(label, logits)

    test_loss = test_loss_metric.result()

    tqdm.write(
        'epoch : {}, tr_acc : {:.3f}%, tr_loss : {:.3f}, '.format(1, test_acc_metric.result() * 100, test_loss))
Esempio n. 2
0
def main(argv):
    train_data = Path.cwd() / '..' / 'data_in' / 'train.txt'
    val_data = Path.cwd() / '..' / 'data_in' / 'val.txt'
    test_data = Path.cwd() / '..' / 'data_in' / 'test.txt'
    dev_data = Path.cwd() / '..' / 'data_in' / 'dev.txt'
    # init params
    classes = FLAGS.classes
    max_length = FLAGS.length
    epochs = FLAGS.epochs
    learning_rate = FLAGS.learning_rate
    dim = FLAGS.embedding_dim
    global_step = 1000
    batch_size = FLAGS.batch_size

    with open(Path.cwd() / '..' / 'data_in' / 'vocab.pkl', mode='rb') as io:
        vocab = pickle.load(io)

    train = tf.data.TextLineDataset(str(train_data)).shuffle(
        buffer_size=batch_size).batch(batch_size=batch_size)
    eval = tf.data.TextLineDataset(str(val_data)).batch(batch_size=batch_size)
    test = tf.data.TextLineDataset(str(test_data)).batch(batch_size=batch_size)
    dev = tf.data.TextLineDataset(str(dev_data)).batch(batch_size=batch_size)

    padder = PadSequence(max_length,
                         pad_val=vocab.to_indices(vocab.padding_token))
    processing = Corpus(vocab=vocab, split_fn=Split(), pad_fn=padder)

    # create model
    char_cnn = CharCNN(vocab=vocab, classes=classes, dim=dim)

    # create optimizer & loss_fn
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy()

    train_loss_metric = tf.keras.metrics.Mean(name='train_loss')
    train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(
        name='train_accuracy')
    val_loss_metric = tf.keras.metrics.Mean(name='val_loss')
    val_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(
        name='val_accuracy')

    # train_summary_writer = tf.summary.create_file_writer('./data_out/summaries/train')
    # eval_summary_writer = tf.summary.create_file_writer('./data_out/summaries/eval')

    # ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=char_cnn)
    # manager = tf.train.CheckpointManager(ckpt, './data_out/tf_ckpts', max_to_keep=3)
    # ckpt.restore(manager.latest_checkpoint)
    #
    # if manager.latest_checkpoint:
    #     print("Restored from {}".format(manager.latest_checkpoint))
    # else:
    #     print("Initializing from scratch.")

    #training
    for epoch in tqdm(range(epochs), desc='epochs'):

        train_loss_metric.reset_states()
        train_acc_metric.reset_states()
        val_loss_metric.reset_states()
        val_acc_metric.reset_states()
        tf.keras.backend.set_learning_phase(1)

        #with train_summary_writer.as_default():
        for step, val in tqdm(enumerate(train), desc='steps'):
            data, label = processing.token2idex(val)
            with tf.GradientTape() as tape:
                logits = char_cnn(data)
                train_loss = loss_fn(label, logits)

            #ckpt.step.assign_add(1)
            grads = tape.gradient(target=train_loss,
                                  sources=char_cnn.trainable_variables)
            opt.apply_gradients(
                grads_and_vars=zip(grads, char_cnn.trainable_variables))

            train_loss_metric.update_state(train_loss)
            train_acc_metric.update_state(label, logits)

            # if tf.equal(opt.iterations % global_step, 0):
            #     tf.summary.scalar('loss', train_loss_metric.result(), step=opt.iterations)

        tr_loss = train_loss_metric.result()

        #save_path = manager.save()
        #print(save_path)
        tqdm.write('epoch : {}, tr_acc : {:.3f}%, tr_loss : {:.3f}'.format(
            epoch + 1,
            train_acc_metric.result() * 100, tr_loss))
Esempio n. 3
0
def main(argv):
    train_data = Path.cwd() / 'data_in' / 'train.txt'
    val_data = Path.cwd() / 'data_in' / 'val.txt'

    with open(Path.cwd() / 'data_in' / 'vocab.pkl', mode='rb') as io:
        vocab = pickle.load(io)

    train = tf.data.TextLineDataset(str(train_data)).shuffle(buffer_size=1000).batch(batch_size=FLAGS.batch_size,
                                                                                     drop_remainder=True)
    eval = tf.data.TextLineDataset(str(val_data)).batch(batch_size=FLAGS.batch_size, drop_remainder=True)

    tokenized = MeCab()
    processing = Corpus(vocab=vocab, tokenizer=tokenized)

    # init params
    classes = FLAGS.classes
    max_length = FLAGS.length
    epochs = FLAGS.epochs
    learning_rate = FLAGS.learning_rate
    global_step = 1000

    # create model
    sen_cnn = SenCNN(vocab=vocab, classes=classes)

    # create optimizer & loss_fn
    opt = tf.optimizers.Adam(learning_rate=learning_rate)
    loss_fn = tf.losses.SparseCategoricalCrossentropy(from_logits=True)

    train_loss_metric = tf.keras.metrics.Mean(name='train_loss')
    train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
    val_loss_metric = tf.keras.metrics.Mean(name='val_loss')
    val_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')

    train_summary_writer = tf.summary.create_file_writer('./data_out/summaries/train')
    eval_summary_writer = tf.summary.create_file_writer('./data_out/summaries/eval')

    ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=sen_cnn)
    manager = tf.train.CheckpointManager(ckpt, './data_out/tf_ckpts', max_to_keep=3)
    ckpt.restore(manager.latest_checkpoint)

    if manager.latest_checkpoint:
        print("Restored from {}".format(manager.latest_checkpoint))
    else:
        print("Initializing from scratch.")

    # training
    for epoch in tqdm(range(epochs), desc='epochs'):

        train_loss_metric.reset_states()
        train_acc_metric.reset_states()
        val_loss_metric.reset_states()
        val_acc_metric.reset_states()
        tf.keras.backend.set_learning_phase(1)

        tr_loss = 0
        with train_summary_writer.as_default():
            for step, val in tqdm(enumerate(train), desc='steps'):
                data, label = processing.token2idex(val)
                with tf.GradientTape() as tape:
                    logits = sen_cnn(data)
                    train_loss = loss_fn(label, logits)
                ckpt.step.assign_add(1)
                grads = tape.gradient(target=train_loss, sources=sen_cnn.trainable_variables)
                opt.apply_gradients(grads_and_vars=zip(grads, sen_cnn.trainable_variables))
                # tr_loss += pred_loss.numpy()

                train_loss_metric.update_state(train_loss)
                train_acc_metric.update_state(label, logits)

                if tf.equal(opt.iterations % global_step, 0):
                    tf.summary.scalar('loss', train_loss_metric.result(), step=opt.iterations)

        # else:
        # tr_loss /= (step + 1)
        # print("t_loss {}".format(tr_loss))
        tr_loss = train_loss_metric.result()
        save_path = manager.save()
        print(save_path)

        tf.keras.backend.set_learning_phase(0)

        val_loss = 0
        with eval_summary_writer.as_default():
            for step, val in tqdm(enumerate(eval), desc='steps'):
                data, label = processing.token2idex(val)
                logits = sen_cnn(data)
                val_loss = loss_fn(label, logits)
                # val_loss += mb_loss.numpy()
                val_loss_metric.update_state(val_loss)
                val_acc_metric.update_state(label, logits)
                tf.summary.scalar('loss', val_loss_metric.result(), step=step)

        val_loss = val_loss_metric.result()

        tqdm.write(
            'epoch : {}, tr_acc : {:.3f}%, tr_loss : {:.3f}, val_acc : {:.3f}%, val_loss : {:.3f}'.format(epoch + 1,
                                                                                                          train_acc_metric.result() * 100,
                                                                                                          tr_loss,
                                                                                                          val_acc_metric.result() * 100,
                                                                                                          val_loss))