model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.SGD(), metrics=['accuracy']) x_train, y_train, x_test, y_test = U.get_mnist() #train on a subset only x_train = x_train[:1000] y_train = y_train[:1000] epsilon = 2e-4 # step size tau = 200 # number of steps to take before the reject/accept step burn_in = 100 sample_every = 30 N_ensemble = 20 #number of models to create N_restarts = 5 #use multiple intitialisations # use multiple intitialisations ensemble = [] with open('save/tmp/losses.dat', 'w') as f: print('', f)
Dense(128, activation=act_fn), weight_regularizer=WEIGHT_REGULARIZER, dropout_regularizer=DROPOUT_REGULARIZER, )) model.add( ConcreteDropout( Dense(N_CLASSES, activation='softmax'), weight_regularizer=WEIGHT_REGULARIZER, dropout_regularizer=DROPOUT_REGULARIZER, )) return model if __name__ == "__main__": x_train, y_train, x_test, y_test = mnist_to_3s_and_7s(U.get_mnist()) # mnist, scaled to the range 0,1. epochs = 50 batch_size = 128 model = define_cdropout_3s_7s() model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy']) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), callbacks=[TrackConcreteDropoutP(model)
import src.utilities as U from latent_plots import get_models, visualise_latent_space plt.rcParams['figure.figsize'] = 8, 5 #use true type fonts only plt.rcParams['pdf.fonttype'] = 42 plt.rcParams['ps.fonttype'] = 42 if __name__ == '__main__': model, encoder, decoder = get_models() #move along a random line in latent space _, _, mnist, label = U.get_mnist() x1 = mnist[label.argmax(axis=1) == 6][200] x2 = mnist[label.argmax(axis=1) == 8][200] x_ims = np.stack([(1 - t) * x1 + t * x2 for t in np.linspace(0, 1, 15)]) x_preds, x_entropy, x_bald = model.get_results(x_ims) z_begin = encoder.predict(x1[None, :]).flatten() z_end = encoder.predict(x2[None, :]).flatten() z_lin = np.stack([(1 - t) * z_begin + t * z_end for t in np.linspace(0, 1, 15)]) z_ims = decoder.predict(z_lin)