def test(epoch, mode=1): import matplotlib.pyplot as plt from PIL import Image from networks.capsulenet.helper_function import combine_images if mode == 1: num_classes =10 _,(x_test,y_test) = load_cifar_10() else: num_classes = 100 _,(x_test,y_test) = load_cifar_100() model = CapsNetv2(input_shape=[32, 32, 3], n_class=num_classes, n_route=3) model.load_weights('weights/capsule_weights/capsule-cifar-'+str(num_classes)+'weights-{:02d}.h5'.format(epoch)) print("Weights loaded, start validation") # model.load_weights('weights/capsule-weights-{:02d}.h5'.format(epoch)) y_pred, x_recon = model.predict([x_test, y_test], batch_size=100) print('-'*50) # Test acc: 0.7307 print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0]) img = combine_images(np.concatenate([x_test[:50],x_recon[:50]])) image = img*255 Image.fromarray(image.astype(np.uint8)).save("results/real_and_recon.png") print('Reconstructed images are saved to ./results/real_and_recon.png') print('-'*50) plt.imshow(plt.imread("results/real_and_recon.png", )) plt.show()
def train(epochs=50, batch_size=64, mode=1): import numpy as np import os from keras import callbacks from keras.utils.vis_utils import plot_model if mode == 1: num_classes = 10 (x_train, y_train), (x_test, y_test) = load_cifar_10() else: num_classes = 100 (x_train, y_train), (x_test, y_test) = load_cifar_100() model = CapsNetv1(input_shape=[32, 32, 3], n_class=num_classes, n_route=3) print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') model.summary() log = callbacks.CSVLogger('networks/models/results/capsule-cifar-' + str(num_classes) + '-log.csv') tb = callbacks.TensorBoard( log_dir='networks/models/results/tensorboard-capsule-cifar-' + str(num_classes) + '-logs', batch_size=batch_size, histogram_freq=True) checkpoint = callbacks.ModelCheckpoint('networks/models/capsnet.h5', save_best_only=True, verbose=1) lr_decay = callbacks.LearningRateScheduler( schedule=lambda epoch: 0.001 * np.exp(-epoch / 10.)) # plot_model(model, to_file='models/capsule-cifar-'+str(num_classes)+'.png', show_shapes=True) model.compile(optimizer=optimizers.Adam(lr=0.001), loss=[margin_loss, 'mse'], loss_weights=[1., 0.1], metrics={ 'output_recon': 'accuracy', 'output': 'accuracy' }) from networks.capsulenet.helper_function import data_generator generator = data_generator(x_train, y_train, batch_size) # Image generator significantly increase the accuracy and reduce validation loss model.fit_generator(generator, steps_per_epoch=x_train.shape[0] // batch_size, validation_data=([x_test, y_test], [y_test, x_test]), epochs=epochs, verbose=1, max_q_size=100, callbacks=[log, tb, checkpoint, lr_decay]) return model
def train(epochs=50,batch_size=64,mode=1): import numpy as np import os from keras import callbacks from keras.utils.vis_utils import plot_model if mode==1: num_classes = 10 (x_train,y_train),(x_test,y_test) = load_cifar_10() else: num_classes = 100 (x_train,y_train),(x_test,y_test) = load_cifar_100() model = CapsNetv1(input_shape=[32, 32, 3], n_class=num_classes, n_route=3) print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') model.summary() log = callbacks.CSVLogger('networks/models/results/capsule-cifar-'+str(num_classes)+'-log.csv') tb = callbacks.TensorBoard(log_dir='networks/models/results/tensorboard-capsule-cifar-'+str(num_classes)+'-logs', batch_size=batch_size, histogram_freq=True) checkpoint = callbacks.ModelCheckpoint('networks/models/capsnet.h5', save_best_only=True, verbose=1) lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001 * np.exp(-epoch / 10.)) # plot_model(model, to_file='models/capsule-cifar-'+str(num_classes)+'.png', show_shapes=True) model.compile(optimizer=optimizers.Adam(lr=0.001), loss=[margin_loss, 'mse'], loss_weights=[1., 0.1], metrics={'output_recon':'accuracy','output':'accuracy'}) from networks.capsulenet.helper_function import data_generator generator = data_generator(x_train,y_train,batch_size) # Image generator significantly increase the accuracy and reduce validation loss model.fit_generator(generator, steps_per_epoch=x_train.shape[0] // batch_size, validation_data=([x_test, y_test], [y_test, x_test]), epochs=epochs, verbose=1, max_q_size=100, callbacks=[log,tb,checkpoint,lr_decay]) return model