def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu scalars_to_plot = np.arange(0.0, 1.0, .1) fig, axes = plt.subplots(figsize=(15, 5), nrows=2, ncols=5) params = { 'channel': 1, 'learning_rate': .05, 'regularization': 0.0001, 'steps': 1000, 'loss': 'TV', 'loss_lambda': 0, } layer_names = [ 'conv3', #'conv4', #'conv5', ] tensor_names = [ 'conv_2', #'conv_3', #'conv_4', ] alexnet_kwargs = {'train': False} loss = 'TV' preproc = True for layer_name, tensor_name in zip(layer_names, tensor_names): print("Processing %s" % layer_name) for loss_lambda, ax in zip(scalars_to_plot, axes.ravel()): params['loss_lambda'] = loss_lambda print("Processing loss %d" % loss_lambda) params['tensor_name'] = tensor_name optimal_image, loss_list = get_optimal_image( alexnet_no_fc_wrapper, alexnet_kwargs, CKPT_PATH, params, preproc, layer_name=None, ) ax.imshow(optimal_image) ax.set_title(str(round(loss_lambda, 2))) ax.axis('off') save_path = "%s/alexnet_%s_TVloss_lambdas.png" % (SAVE_PATH, layer_name) plt.savefig(save_path, dpi=300)
def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu layer_params = { 'conv3': { 'channel': 0, 'learning_rate': 0.05, 'regularization': 1e-4, 'steps': 2048, 'tensor_name': 'conv_2', #'unit_index': (4,4), 'loss': 'TV', 'loss_lambda': 0.5, } } keys = ['conv3'] alexnet_kwargs = {'train': False} # setup optional inputs preproc = True layer_dict = layer_params[keys[0]] optimal_image, loss_list = get_optimal_image( alexnet_no_fc_wrapper, alexnet_kwargs, CKPT_PATH, layer_dict, preproc, layer_name=None, ) plt.imshow(optimal_image) save_path = "%s/testing_refactor2.png" % SAVE_PATH plt.savefig(save_path, dpi=200)
def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu layer_params = { 'conv5': { 'channel': 6, 'learning_rate': 0.05, 'regularization': 0.0001, 'steps': 500, 'tensor_name': 'conv_4', 'loss': 'TV', }, } keys = [ 'conv5', ] alexnet_kwargs = { 'train': False } loss = 'TV' images = np.zeros((iterations, 128, 128, 3)) layer_name = keys[0] layer_dict = layer_params[layer_name] fig, axes = plt.subplots(figsize=(20, 20), nrows=5, ncols=5) for its, ax in enumerate(fig.axes): optimal_image, loss_list = get_optimal_image( alexnet_no_fc_wrapper, alexnet_kwargs, CKPT_PATH, layer_dict, preproc=False, layer_name=None, ) print(its) images[its,:,:,:] = optimal_image ax.imshow(optimal_image) ax.axis('off') save_path = "%s/alexnet_repeat_testing%d.png" % (SAVE_PATH,iterations) plt.savefig(save_path, dpi=200) # now convert the images to a tensor and resize tf.reset_default_graph() image_tensor = tf.convert_to_tensor(images, dtype=tf.float32) #resize for alexnet resized_images = tf.image.resize_images(image_tensor, (224, 224)) #initialize model convnet = alexnet(resized_images, train=False) # define output tensors of interest fc8_outputs = convnet.layers['fc8'] # initialize tf Session and restore weighs sess = tf.Session() tf_saver_restore = tf.train.Saver() tf_saver_restore.restore(sess, CKPT_PATH) # run the tensors logits = sess.run(fc8_outputs) # apply softmax to output probs = np.zeros((logits.shape)) for col in range(len(logits)): #ugly but works probs[col,:] = softmax(logits[col,:]) winning_class = (np.argmax(probs,1)) #top 1 print(winning_class) # get top 5 and compute overlap top_5 = np.argpartition(probs, -5,axis=1)[:,-5:] total = len(np.ravel(top_5)) no_repeats = len(np.unique(top_5)) overlap = total-no_repeats print(total) print(overlap) print((overlap/total)*100) #percent overlap # visualize the probs across repeats fig,ax = plt.subplots(figsize=(20, 5)) img = ax.imshow(logits[:,1:100]) ax.set_xlabel('Class logits for the first 100 classes') ax.set_title("Class logits across repeated optimizations") fig.colorbar(img, ax=ax) save_path = "%s/alexnet_repeats_visualize_class_logits.png" % (SAVE_PATH) plt.savefig(save_path, dpi=200) #get one spearman's rho as a baseline baseline_rho = spearmanr(np.argsort(logits[0]),np.argsort(logits[1])) print(baseline_rho)
def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu unit_x = 8 unit_y = 8 #pick a random channel channel = random.randrange((96)) layer_params = { 'conv1': { 'channel': channel, 'learning_rate': 0.05, 'regularization': 1e-4, 'steps': 500, 'tensor_name': 'conv', 'unit_index': (unit_x, unit_y), } } keys = ['conv1'] alexnet_kwargs = {'train': False} # setup optional inputs preproc = False loss = None ## Run imageopt layer_dict = layer_params['conv1'] optimal_image, loss_list = get_optimal_image( alexnet_no_fc_wrapper, alexnet_kwargs, CKPT_PATH, layer_dict, preproc, layer_name=None, ) ## Get weights back tf.reset_default_graph() init = tf.random_uniform_initializer(minval=0, maxval=1) #initialize random noise image_shape = (1, 128, 128, 3) images = tf.get_variable("images", image_shape, initializer=init) #use random noise image to get alexnet graph back model = alexnet_no_fc_wrapper(images, tensor_name='conv', train=False) sess = tf.Session() all_variables = tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES) temp_saver = tf.train.Saver(var_list=[ v for v in all_variables if "images" not in v.name and "beta" not in v.name ]) temp_saver.restore(sess, CKPT_PATH) #restore checkpoint weights weights_tensor = tf.get_default_graph().get_tensor_by_name( "conv1/weights:0") weights = sess.run(weights_tensor) ## Plot fig, (ax1, ax2, ax3) = plt.subplots(figsize=(17, 5), nrows=1, ncols=3) title = "AlexNet sanity check: Channel %d" % (channel) fig.suptitle(title) true_filter = norm_image(weights[:, :, :, channel]) ax1.imshow(true_filter) ax1.set_title("True filter") ax2.imshow(optimal_image) ax2.set_title("Optimized image") ax3.plot(loss_list, c='k', linewidth=5) ax3.spines['top'].set_visible(False) ax3.spines['right'].set_visible(False) ax3.set_ylabel('Loss') ax3.set_xlabel('Steps') ax3.set_title("Loss over iterations") if loss is None: save_path = "%s/alexnet_sanity_check_channel%d.png" % (SAVE_PATH, channel) else: save_path = "%s/alexnet_sanity_check_channel%d_with%sloss.png" % ( SAVE_PATH, channel, loss) plt.savefig(save_path, dpi=200)
def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu fig, axes = plt.subplots(figsize=(25, 5), nrows=1, ncols=5) loss = 'TV' preproc = True params = { 'channel':0, 'learning_rate': .05, 'regularization': 0.0001, 'steps': 1000, # 2048, 'loss': loss, #'loss_lambda': 1.0, } preproc_params = { 'pad': 20, #12, 'scale': True, 'rotate': True, 'pre_jitter': 8, 'post_jitter': 8, } layer_names = [ 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', ] tensor_names = [ 'conv', 'conv_1', 'conv_2', 'conv_3', 'conv_4', ] alexnet_kwargs = { 'train': False } for tensor_name, ax in zip(tensor_names, axes.ravel()): print("Processing %s" % tensor_name) params['tensor_name'] = tensor_name optimal_image, loss_list = get_optimal_image( alexnet_no_fc_wrapper, alexnet_kwargs, CKPT_PATH, params, preproc, preproc_params, layer_name=None, ) ax.imshow(optimal_image) ax.axis('off') if preproc is True: if loss is None: save_path = "%s/alexnet_sample_preproc.png" % (SAVE_PATH) else: save_path = "%s/alexnet_sample_preproc_%sloss.png" % (SAVE_PATH, loss) else: if loss is None: save_path = "%s/alexnet_sample_nopreproc.png" % (SAVE_PATH) else: save_path = "%s/alexnet_sample_nopreproc_%sloss.png" % (SAVE_PATH, loss) plt.savefig(save_path, dpi=300)
def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu fig, axes = plt.subplots(figsize=(40, 5), nrows=1, ncols=10) loss = 'TV' preproc = True params = { 'channel': 11, 'learning_rate': .05, 'regularization': 0.0001, 'steps': 1000, # 2048, 'loss': loss, } layer_names = [ 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'conv6', 'conv7', 'conv8', 'conv9', 'conv10', ] tnn_kwargs = { 'json_fpath': JSON_PATH, 'batch_size': 1, } for layer_name, ax in zip(layer_names, axes.ravel()): print("Processing %s" % layer_name) optimal_image, loss_list = get_optimal_image( tnn_no_fc_wrapper, tnn_kwargs, CKPT_PATH, params, preproc, layer_name=layer_name, ) ax.imshow(optimal_image) ax.axis('off') if preproc is True: if loss is None: save_path = "%s/tnn_sample_preproc.png" % (SAVE_PATH) else: save_path = "%s/tnn_sample_preproc_%sloss.png" % (SAVE_PATH, loss) else: if loss is None: save_path = "%s/tnn_sample_nopreproc.png" % (SAVE_PATH) else: save_path = "%s/tnn_sample_nopreproc_%sloss.png" % (SAVE_PATH, loss) plt.savefig(save_path, dpi=300)
def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu channels_to_plot = np.arange(25) fig, axes = plt.subplots(figsize=(20, 20), nrows=5, ncols=5) params = { 'channel': 1, 'learning_rate': .05, 'regularization': 0.0001, 'steps': 1000, #'loss': 'TV', #'loss_lambda': 1.0, } layer_names = [ 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', ] tensor_names = [ 'conv', 'conv_1', 'conv_2', 'conv_3', 'conv_4', ] alexnet_kwargs = { 'train': False } loss = None #'TV' preproc = False for layer_name, tensor_name in zip(layer_names, tensor_names): print("Processing %s" % layer_name) for channel, ax in zip(channels_to_plot, axes.ravel()): print("Processing channel %d" % channel) params['channel'] = channel params['tensor_name'] = tensor_name optimal_image, loss_list = get_optimal_image( alexnet_no_fc_wrapper, alexnet_kwargs, CKPT_PATH, params, preproc, layer_name=None, ) ax.imshow(optimal_image) ax.axis('off') if preproc is True: if loss is None: save_path = "%s/alexnet_%s_25channels_preproc.png" % (SAVE_PATH, layer_name) else: save_path = "%s/alexnet_%s_25channels_preproc_%sloss.png" % (SAVE_PATH, layer_name, loss) else: if loss is None: save_path = "%s/alexnet_%s_25channels_nopreproc.png" % (SAVE_PATH, layer_name) else: save_path = "%s/alexnet_%s_25channels_nopreproc_%sloss.png" % (SAVE_PATH, layer_name, loss) plt.savefig(save_path, dpi=300)
def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu channels_to_plot = np.arange(25) fig, axes = plt.subplots(figsize=(20, 20), nrows=5, ncols=5) preproc = False loss = 'TV' params = { 'channel': 1, 'learning_rate': 0.05, 'regularization': 0.0001, 'steps': 2048, 'loss': loss, } layer_names = [ 'conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'conv6', 'conv7', 'conv8', 'conv9', 'conv10', ] tnn_kwargs = { 'json_fpath': JSON_PATH, 'batch_size': 1, } for layer_name in layer_names: for channel, ax in zip(channels_to_plot, axes.ravel()): print("Processing channel %d" % channel) params['channel'] = channel optimal_image, loss_list = get_optimal_image( tnn_no_fc_wrapper, tnn_kwargs, CKPT_PATH, params, preproc, layer_name=layer_name, ) ax.imshow(optimal_image) ax.axis('off') if preproc is True: if loss is None: save_path = "%s/TNN_%s_25channels_preproc.png" % (SAVE_PATH, layer_name) else: save_path = "%s/TNN_%s_25channels_preproc_%sloss.png" % ( SAVE_PATH, layer_name, loss) else: if loss is None: save_path = "%s/TNN_%s_25channels_nopreproc.png" % (SAVE_PATH, layer_name) else: save_path = "%s/TNNt_%s_25channels_nopreproc_%sloss.png" % ( SAVE_PATH, layer_name, loss) print("Writing to %s" % save_path) plt.savefig(save_path, dpi=300)
def main(): print("Using GPU %s" % FLAGS.gpu) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu layer_params = { 'conv4': { 'channel': 0, 'learning_rate': 0.05, 'regularization': 0.0001, 'steps': 500, 'tensor_name': 'conv_3', 'loss': 'TV', }, } keys = [ 'conv4', ] alexnet_kwargs = {'train': False} loss = 'TV' layer_name = keys[0] layer_dict = layer_params[layer_name] fig, (ax1, ax2) = plt.subplots(figsize=(10, 5), nrows=1, ncols=2) title = "Differences from preprocessing choices" fig.suptitle(title) optimal_image_no_preproc, loss_list = get_optimal_image( alexnet_no_fc_wrapper, alexnet_kwargs, CKPT_PATH, layer_dict, preproc=False, layer_name=None, ) ax1.imshow(optimal_image_no_preproc) ax1.set_title("No preprocessing") optimal_image_preproc, loss_list = get_optimal_image( alexnet_no_fc_wrapper, alexnet_kwargs, CKPT_PATH, layer_dict, preproc=True, layer_name=None, ) ax2.imshow(optimal_image_preproc) ax2.set_title("With preprocessing") save_path = "%s/alexnet_preproc_diffs_conv4.png" % (SAVE_PATH) plt.savefig(save_path, dpi=200) # now convert the images to a tensor and resize tf.reset_default_graph() # resize and stack the images image_tensor_no_preproc = tf.convert_to_tensor(optimal_image_no_preproc, dtype=tf.float32) npre = tf.image.resize_images(image_tensor_no_preproc, (224, 224)) image_tensor_preproc = tf.convert_to_tensor(optimal_image_preproc, dtype=tf.float32) pre = tf.image.resize_images(image_tensor_preproc, (224, 224)) images = tf.stack([npre, pre]) #initialize model convnet = alexnet(images, train=False) # define output tensors of interest fc8_outputs = convnet.layers['fc8'] # initialize tf Session and restore weighs sess = tf.Session() tf_saver_restore = tf.train.Saver() tf_saver_restore.restore(sess, CKPT_PATH) # run the tensors logits = sess.run(fc8_outputs) # apply softmax to output probs = np.zeros((logits.shape)) for col in range(len(logits)): #ugly but works probs[col, :] = softmax(logits[col, :]) winning_class = (np.argmax(probs, 1)) #top 1 print(winning_class) # get top 5 and compute overlap top_5 = np.argpartition(probs, -5, axis=1)[:, -5:] total = len(np.ravel(top_5)) no_repeats = len(np.unique(top_5)) overlap = total - no_repeats print(total) print(overlap) print((overlap / total) * 100) #percent overlap fig, ax = plt.subplots(figsize=(20, 2)) img = ax.imshow(logits[:, 1:100]) ax.set_xlabel('Class logits for the first 100 classes') ax.set_title( "Class logits for optimization with and without preprocessing") fig.colorbar(img, ax=ax) save_path = "%s/alexnet_repeats_visualize_preproc_logits_conv4.png" % ( SAVE_PATH) plt.savefig(save_path, dpi=200) #compute spearman's rho rho = spearmanr(np.argsort(logits[0]), np.argsort(logits[1])) print(rho)