Пример #1
0
def evaluation(model, supervisor, num_label):
    teX, teY, num_te_batch = load_data(cfg.dataset,
                                       cfg.batch_size,
                                       is_training=False)
    fd_test_acc = save_to()
    with supervisor.managed_session(config=tf.ConfigProto(
            allow_soft_placement=True)) as sess:
        supervisor.saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
        tf.logging.info('Model restored!')

        test_acc = 0
        prob = np.zeros((num_te_batch * cfg.batch_size, num_label))
        for i in tqdm(range(num_te_batch),
                      total=num_te_batch,
                      ncols=70,
                      leave=False,
                      unit='b'):
            start = i * cfg.batch_size
            end = start + cfg.batch_size
            acc, prob[start:end, :] = sess.run(
                [model.accuracy, model.activation], {
                    model.X: teX[start:end],
                    model.labels: teY[start:end]
                })
            test_acc += acc
        test_acc = test_acc / (cfg.batch_size * num_te_batch)
        np.savetxt(cfg.results + '/prob_test.txt', prob, fmt='%1.2f')
        print(
            'Classification probability for each category has been saved to ' +
            cfg.results + '/prob_test.txt')
        fd_test_acc.write(str(test_acc))
        fd_test_acc.close()
        print('Test accuracy has been saved to ' + cfg.results +
              '/test_accuracy.txt')
Пример #2
0
def train(model, supervisor):
    trX, trY, num_tr_batch, valX, valY, num_val_batch = load_data(cfg.dataset, is_training=True)
    Y = valY[:num_val_batch * cfg.batch_size].reshape((-1, 1))

    if cfg.dataset == 'mnist' or cfg.dataset == 'fashion-mnist':
        num_label = 10
    elif cfg.dataset == 'smallNORB':
        num_label = 5
    fd_train_acc, fd_loss, fd_val_acc = save_to()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with supervisor.managed_session(config=config) as sess:
        print("\nNote: all of results will be saved to directory: " + cfg.results)
        for epoch in range(cfg.epoch):
            print('Training for epoch ' + str(epoch) + '/' + str(cfg.epoch) + ':')
            if supervisor.should_stop():
                print('supervisor stoped!')
                break
            for step in tqdm(range(num_tr_batch), total=num_tr_batch, ncols=70, leave=False, unit='b'):
                start = step * cfg.batch_size
                end = start + cfg.batch_size
                global_step = epoch * num_tr_batch + step

                if global_step % cfg.train_sum_freq == 0:
                    _, loss, train_acc, summary_str = sess.run([model.train_op, model.loss, model.accuracy, model.train_summary])
                    assert not np.isnan(loss), 'Something wrong! loss is nan...'
                    supervisor.summary_writer.add_summary(summary_str, global_step)

                    fd_loss.write(str(global_step) + ',' + str(loss) + "\n")
                    fd_loss.flush()
                    fd_train_acc.write(str(global_step) + ',' + str(train_acc / cfg.batch_size) + "\n")
                    fd_train_acc.flush()
                else:
                    sess.run(model.train_op)

                if cfg.val_sum_freq != 0 and (global_step) % cfg.val_sum_freq == 0:
                    val_acc = 0
                    prob = np.zeros((num_val_batch * cfg.batch_size, num_label))
                    for i in range(num_val_batch):
                        start = i * cfg.batch_size
                        end = start + cfg.batch_size
                        acc, prob[start:end, :] = sess.run([model.accuracy, model.activation], {model.X: valX[start:end], model.labels: valY[start:end]})
                        val_acc += acc
                    val_acc = val_acc / (cfg.batch_size * num_val_batch)
                    np.savetxt(cfg.results + '/activations_step_' + str(global_step) + '.txt', np.hstack((prob, Y)), fmt='%1.2f')
                    fd_val_acc.write(str(global_step) + ',' + str(val_acc) + '\n')
                    fd_val_acc.flush()

            if (epoch + 1) % cfg.save_freq == 0:
                supervisor.saver.save(sess, cfg.logdir + '/model_epoch_%04d_step_%02d' % (epoch, global_step))

        fd_val_acc.close()
        fd_train_acc.close()
        fd_loss.close()