loss='categorical_crossentropy', metrics=['accuracy']) print(model.summary()) x_train, y_train, x_test, y_test = get_mnist_data() # checking that the accuracy is the same as before 99% at the first epoch. test_loss, test_acc = model.evaluate(x_test, y_test, verbose=1, batch_size=128) print('') assert test_acc > 0.98 get_activations(model, x_test[0:1], print_shape_only=True) # with just one sample. get_activations(model, x_test[0:200], print_shape_only=True) # with 200 samples. else: x_train, y_train, x_test, y_test = get_mnist_data() model = Sequential() model.add( Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2)))
for enc in range(len(encoders)): encoder = encoders[enc]; encoder.summary() encoder.fit(all_slcs, all_slcs, epochs=50, #epochs=2, batch_size=128, shuffle=True, validation_split=0.3, callbacks=[EarlyStopping(patience=2)]) encoder_filename = labels[enc] + '.h5' encoder.save(encoder_filename) preds.append(encoder.predict(all_slcs)) predictions.update({labels[enc]:preds[enc]}) sio.savemat('predictions.mat', predictions) actviews = read_activations.get_activations(encoder, all_slcs[1:10,:,:], True)) acts = [] for i in range(len(actviews)): #acts.append(np.squeeze(actviews[i][:,:,:,0])) #int(len(acts)) lab = labels[enc] + '_' + str(i) #activation_views.update({lab:acts}) actview = actviews[i] activation_views.update({lab:actview}) #sio.savemat(lab + '.mat', activation_views) #activation_views.update({lab:acts}) sio.savemat('activation_views.mat', activation_views) end = time.time()
def predict_image(model, img_path, output_dir, input_shape=None): if not os.path.exists(output_dir): os.makedirs(output_dir) batch_input_shape = model.layers[0].get_config()['batch_input_shape'] ddprint("batch_input_shape:", batch_input_shape) x = None if os.path.splitext(img_path)[1] == ".floats": if input_shape == None: input_shape = [ 1, batch_input_shape[1], batch_input_shape[2], batch_input_shape[3] ] x = load_input_from_floats(img_path, input_shape) else: x = load_input_from_floats(img_path, [1, input_shape[1], input_shape[0], 3]) else: if input_shape == None: input_shape = batch_input_shape[1:3] x = load_input_from_img(img_path, input_shape) print("Loaded input file '{}'".format(img_path)) stats(x, "Input to model:") preds = model.predict(x) # decode the results into a list of tuples (class, description, probability) # (one such list for each sample in the batch) print('Top 3 predictions: {}'.format(decode_predictions(preds, top=3)[0])) model_inputs = model.inputs print( 'Computing activations for all layers (This may take about a minute when running Tensorflow on the CPU)' ) activations = du.get_activations(model, x, print_shape_only=True, layer_name=None) print("Saving {} activations for model '{}' to directory '{}'".format( len(activations), model_name, output_dir)) shape_info = {} prefix = model_name + "-" for name, activation in activations.items(): file_name = prefix + name + ".floats" testout_path = os.path.join(output_dir, file_name) out_trans = activation if len(activation.shape) == 4: out_trans = activation.transpose(1, 2, 3, 0).astype("float32") elif len(activation.shape) == 3: out_trans = activation.transpose(1, 2, 0).astype("float32") elif len(activation.shape) == 2: out_trans = activation.transpose(1, 0).astype("float32") dprint("Saving features to " + testout_path) ddprint("Shape: {} Type: {}".format(out_trans.shape, out_trans.dtype)) write_np_array(out_trans, testout_path) shape_info[file_name] = out_trans.shape with open(os.path.join(output_dir, "shapes.json"), "w") as json_file: print(pretty(shape_info), file=json_file) return preds
# print(np.concatenate(a).flatten()) # instantiate model model = Sequential() # we can think of this chunk as the input layer model.add(Dense(64, input_shape=(10,), init='uniform')) model.add(BatchNormalization()) model.add(Dense(1, activation=None)) # setting up the optimization of our weights model.compile(loss='mse', optimizer='adam') BIG_NUMBER = 1000 X_train = np.ones(shape=(1, 10)) y_train = np.ones(shape=(1, 1)) * BIG_NUMBER # explicitly give small input and enormous output to force the weights to become really big. # if the weights become really big, because the input is very small, the activations will become very big. # callback = My_Callback() # running the fitting # model.load_weights('weights.h5') model.fit(X_train, y_train, epochs=5000) # model.save_weights('weights.h5') from read_activations import get_activations a = get_activations(model, X_train)
loss='categorical_crossentropy', metrics=['accuracy']) print(model.summary()) x_train, y_train, x_test, y_test = get_fashionmnist_data() # checking that the accuracy is the same as before 99% at the first epoch. test_loss, test_acc = model.evaluate(x_test, y_test, verbose=1, batch_size=128) print('') assert test_acc > 0 num = math.ceil(10 * random.random()) a = get_activations(model, x_test[num:num + 1], print_shape_only=True) # with just one sample. display_activations(a) # get_activations(model, x_test[0:200], print_shape_only=True) # with 200 samples. import numpy as np import matplotlib.pyplot as plt plt.imshow(np.squeeze(x_test[num:num + 1]), interpolation='None', cmap='gray') else: x_train, y_train, x_test, y_test = get_fashionmnist_data() model = Sequential() model.add(