def main(): # dataset_str = 'rgb' dataset_str = 'smooth' dataset = MIT67Dataset(dataset_str) model = load_model('models/vgg16_hybrid_1365_softmax_mit67_' + dataset_str + '.h5') # first dense layer layer_idx = 19 # choose image (indexed by unshuffled test set) img_idx = 0 X,Y = dataset.test_data((224,224), 'rgb', 1) img = X[img_idx] # directory to save images to dir_name = 'mit67_' + dataset_str + '_saliency/' # save unaltered image for comparison imsave(dir_name + 'smooth_' + str(img_idx) + '.png', img) # try seveal filters at this layer nb_filters = 64 for i in range(nb_filters): print(i) saliency = visualize_saliency(model, layer_idx, i, img) # name format: img_index+layer_index+filter_index imsave((dir_name + 'sal_' + str(img_idx) + '_' + str(layer_idx) + '_' + str(i) + '.png'), saliency )
def main(): # choose datasets # the first listed will be used for training, # all others will be tested on # dataset_strs = ['line_drawings', 'dR_symmetric', 'dR_asymmetric'] dataset_strs = ['smooth'] # dataset_strs = ['smooth', 'smooth_dR_symmetric', 'smooth_dR_asymmetric'] # dataset_strs = ['rgb'] datasets = [MIT67Dataset(s) for s in dataset_strs] # check for optional preprocessing arg train_and_test(datasets)
def main(): dataset_strs = ['smooth'] datasets = [MIT67Dataset(s) for s in dataset_strs] # choose which vgg16 model to use model_str = 'vgg16_hybrid_1365' # model_str = 'vgg16_hybrid_1365_stride' # model_str = 'places365_vgg11_runaway_weights.h5' # changes with which model we're using (number of channels) # color_mode = 'grayscale' color_mode = 'rgb' train_and_test(datasets, model_str, color_mode)
def main(): # CHOOSE DATSETS HERE # this should be a list of strings of the form # ['train_dataset', 'test_dataset_1', 'test_dataset_2', ...] # A model will be trained on the first dataset # (`train_dataset` here), # the trained model is then tested on each included `test_dataset` # the model will by default always be tested at least on the `train_dataset` # dataset_strs = ['rgb'] dataset_strs = ['smooth'] # convert from strings to Dataset objects datasets = [MIT67Dataset(s) for s in dataset_strs] model = train_and_test(datasets) # save model for later model.save('models/vgg16_hybrid_1365_mit67_' + dataset_strs[0] + '.h5')
def main(): # CHOOSE 3 datasets # dataset_strs = ['smooth', 'ribbon', 'taper'] # dataset_strs = ['smooth', 'smooth', 'smooth'] # dataset_strs = ['smooth', 'ribbon', 'ribbon'] # dataset_strs = ['smooth', 'taper', 'taper'] # dataset_strs = ['smooth', 'smooth', 'ribbon'] # dataset_strs = ['ribbon', 'ribbon', 'smooth'] # dataset_strs = ['smooth', 'separation', 'separation'] # dataset_strs = ['smooth', 'ribbon', 'separation'] # dataset_strs = ['smooth', 'taper', 'separation'] dataset_strs = ['ribbon', 'taper', 'separation'] print('using datasets: ') print(dataset_strs) datasets = [MIT67Dataset(s) for s in dataset_strs] train_and_test(datasets)
def main(): # choose 3 datasets # dataset_strs = ['ribbon', 'ribbon', 'ribbon'] # dataset_strs = ['taper', 'taper', 'taper'] # dataset_strs = ['smooth', 'ribbon', 'ribbon'] # dataset_strs = ['smooth', 'smooth', 'smooth'] # dataset_strs = ['smooth', 'taper', 'taper'] # dataset_strs = ['smooth', 'ribbon', 'ribbon'] dataset_strs = ['smooth', 'ribbon', 'taper'] print('using datasets: ') print(dataset_strs) datasets = [MIT67Dataset(s) for s in dataset_strs] # choose which vgg16 model to use model_str = 'vgg16_hybrid_1365' # model_str = 'vgg16_hybrid_1365_stride' train_and_test(datasets, model_str)
def main(): dataset_str = 'rgb' dataset = MIT67Dataset(dataset_str) img_size = (224, 224) color_mode = 'rgb' x_train, y_train = dataset.train_data(img_size, color_mode, 1) x_test, y_test = dataset.test_data(img_size, color_mode, 1) # First Trial -- Import UNMODIFIED VGG16 model = vgg16_hybrid_1365(1) train_and_test(model, x_train, y_train, x_test, y_test) # clear memory K.clear_session() # Second Trial -- Replace Pooling layers with larger stride model = vgg16_hybrid_1365_stride(1) train_and_test(model, x_train, y_train, x_test, y_test)
def bottleneck_features(): model = vgg16_hybrid_1365(1) img_size = (224, 224) color_mode = 'rgb' # Load RGB dataset rgb_dataset = MIT67Dataset('rgb') x_rgb_train, y_rgb_train = rgb_dataset.train_data(img_size, color_mode, 1) x_rgb_test, y_rgb_test = rgb_dataset.test_data(img_size, color_mode, 1) rgb_train_labels = np.asarray([np.argmax(y) for y in y_rgb_train]) rgb_test_labels = np.asarray([np.argmax(y) for y in y_rgb_test]) color_mode = 'grayscale' # load weighted line drawings intact_dataset = MIT67Dataset('smooth') x1_train, y1_train = intact_dataset.train_data(img_size, color_mode, 1) x1_test, y1_test = intact_dataset.test_data(img_size, color_mode, 1) dR_dataset = MIT67Dataset('dR_weighted') x2_train, y2_train = dR_dataset.train_data(img_size, color_mode, 1) x2_test, y2_test = dR_dataset.test_data(img_size, color_mode, 1) d2R_dataset = MIT67Dataset('d2R_weighted') x3_train, y3_train = d2R_dataset.train_data(img_size, color_mode, 1) x3_test, y3_test = d2R_dataset.test_data(img_size, color_mode, 1) x_train = np.ndarray(shape=(x1_train.shape[0:3] + (3, ))) x_test = np.ndarray(shape=(x1_test.shape[0:3] + (3, ))) # put each dataset in respective channels x_train[:, :, :, 0] = x1_train.squeeze() x_train[:, :, :, 1] = x2_train.squeeze() x_train[:, :, :, 2] = x3_train.squeeze() x_test[:, :, :, 0] = x1_test.squeeze() x_test[:, :, :, 1] = x2_test.squeeze() x_test[:, :, :, 2] = x3_test.squeeze() x_train_labels = np.asarray([np.argmax(y) for y in y1_train]) x_test_labels = np.asarray([np.argmax(y) for y in y1_test]) # get rgb bottleneck features rgb_bneck_train = model.predict(x_rgb_train) rgb_bneck_test = model.predict(x_rgb_test) # other bneck features x_bneck_train = model.predict(x_train) x_bneck_test = model.predict(x_test) # concatenate features bneck_train = np.concatenate((rgb_bneck_train, x_bneck_train), axis=0) bneck_test = np.concatenate((rgb_bneck_test, x_bneck_test), axis=0) train_labels = np.concatenate((rgb_train_labels, x_train_labels), axis=0) test_labels = np.concatenate((rgb_test_labels, x_test_labels), axis=0) np.save('bneck_train', bneck_train) np.save('bneck_test', bneck_test) np.save('train_labels', train_labels) np.save('test_labels', test_labels) print('done')
def main(): dataset_str = 'smooth' datasets = [MIT67Dataset(dataset_str)] train_and_test(datasets)
def main(): dataset_str = 'smooth' dataset = MIT67Dataset(dataset_str) train_and_visualize(dataset)