Beispiel #1
0
def main(_):
    """
    训练模型
    :return:
    """
    caps_model = CapsuleNet(is_training=True)
    sv = tf.train.Supervisor(logdir=MODEL_PATH, graph=caps_model.graph, global_step=caps_model.global_step,
                             summary_op=caps_model.summary)
    mnist_data, train_num, test_num = load_mnist()
    x_train = mnist_data.train.images
    y_train = mnist_data.train.labels
    x_test = mnist_data.test.images
    y_test = mnist_data.test.labels
    with sv.managed_session() as sess:
        max_eval = 0
        for epoch in range(para.num_epoch):
            # 训练
            train_loss, train_acc = train_eval(sess, caps_model, x_train, y_train, train_num, is_training=True)
            print("train\tepoch\t{},\tloss\t{},\tacc\t{}".format(epoch, train_loss, train_acc))
            # 测试
            test_loss, test_acc = train_eval(sess, caps_model, x_test, y_test, test_num, is_training=False)
            print("eval\tepoch\t{},\tloss\t{},\tacc\t{}".format(epoch, test_loss, test_acc))

            if test_acc > max_eval:
                max_eval = np.mean(test_acc)
                global_step = sess.run(caps_model.global_step)
                sv.saver.save(sess, MODEL_PATH + '/model_epoch_{}_gs_{}'.format(epoch, global_step))
Beispiel #2
0
def reconstruct_image():
    mnist_data, train_num, test_num = load_mnist()
    x_test = mnist_data.test.images[:para.batch_size]
    size = 5
    caps_model = CapsuleNet(is_training=False)
    with caps_model.graph.as_default():
        sv = tf.train.Supervisor(logdir=MODEL_PATH)
        with sv.managed_session() as sess:
            checkpoint_path = tf.train.latest_checkpoint(MODEL_PATH)
            sv.saver.restore(sess, checkpoint_path)
            recon_image = sess.run(caps_model.decoded, feed_dict={caps_model.x: x_test})
            recon_image = np.reshape(recon_image, (para.batch_size, 28, 28, 1))
            x_test = np.reshape(x_test, (para.batch_size, 28, 28, 1))
            for ii in range(5):
                start = ii * size * size
                end = (ii + 1) * size * size
                recon_filename = os.path.join(OUTPUT_PATH, 'recon_image_{}.png'.format(ii + 1))
                save_images(recon_image[start:end, :], [size, size], recon_filename)

                test_filename = os.path.join(OUTPUT_PATH, 'test_image_{}.png'.format(ii + 1))
                save_images(x_test[start:end, :], [size, size], test_filename)
Beispiel #3
0
    'te_acc_dvi_zm': '.4f',
    'te_acc_mcvi_zm': '.4f',
    'tr_time': '.3f',
    'te_time_dvi': '.3f',
    'te_time_mcvi': '.3f'
}

if __name__ == "__main__":
    args = parser.parse_args()
    args.device = torch.device(
        'cuda:{}'.format(args.device) if torch.cuda.is_available() else 'cpu')

    model = LeNetVDO(args).to(args.device)

    args.batch_size, args.test_batch_size = 32, 32
    train_loader, test_loader = load_mnist(args)
    args.data_size = len(train_loader.dataset)

    for layer in model.children():
        i = 0
        if hasattr(layer, 'log_alpha'):
            fmt.update({'{}log_alpha'.format(i + 1): '3.3e'})
            i += 1

    logger = Logger('lenet-vdo', fmt=fmt)
    logger.print(args)
    logger.print(model)

    criterion = ClassificationLoss(model, args)
    optimizer = torch.optim.Adam(
        [p for p in model.parameters() if p.requires_grad], lr=args.lr)
Beispiel #4
0
import matplotlib.pyplot as plt

np.random.seed(1234)

model = Model()

model.add(Conv2D(filters=1, shape=(28, 28, 1), kernel_size=(3, 3)))
model.add(ReLU())
model.add(MaxPooling2D(shape=(2, 2)))
model.add(Flatten())
model.add(Dense(shape=(169, 128)))  #676
model.add(ReLU())
model.add(Dense(shape=(128, 10)))
model.add(Softmax())

model.compile(optimizer=SGD(lr=0.01), loss=CategoricalCrossEntropy())

(x, y), (x_test, y_test) = load_mnist()
x = x.reshape(x.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)

train_loss_cnn, train_acc_cnn, val_loss_cnn, val_acc_cnn = model.fit(
    x, y, x_test, y_test, epoch=10, batch_size=32)

plt.plot(train_acc_cnn, label='cnn train accuracy')
plt.plot(val_acc_cnn, label='cnn val accuracy')
plt.legend()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()