def visualize_max_au_per_id(): model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True' model_file_name = 'model_0.pt' model_file = os.path.join('../../eccv_18/experiments',model_name,model_file_name) out_dir = os.path.join('../experiments',model_name,'au_info') out_file_html = os.path.join(out_dir,'max_idx.html') ims_html = [] captions_html = [] for au_num, au_curr in enumerate(au_map): out_dir_caps = os.path.join(out_dir,'au_'+str(au_num)+'_caps') test_file = os.path.join(out_dir,'correct_'+str(au_num)+'.txt') out_file = os.path.join(out_dir,'au_'+str(au_num)+'_max_idx.txt') ims_row = util.readLinesFromFile(out_file) ims_row = [util.getRelPath(file_curr.replace(str_replace[0],str_replace[1]),dir_server) for file_curr in ims_row] captions_row = [str(au_curr)]*len(ims_row) ims_html.append(ims_row) captions_html.append(captions_row) # print captions_row # raw_input() visualize.writeHTML(out_file_html, ims_html, captions_html, 224,224)
def view_frames_side_by_side(meta_dir, out_file_html, str_replace): dirs_full = [ dir_curr for dir_curr in glob.glob(os.path.join(meta_dir, '*')) if os.path.isdir(dir_curr) ] im_lists = [] for dir_curr in dirs_full: im_list = list(glob.glob(os.path.join(dir_curr, '*'))) im_list.sort() im_lists.append(im_list) num_ims = [len(im_list) for im_list in im_lists] im_rows = [] caption_rows = [] for row_num in range(np.min(num_ims)): im_row = [] caption_row = [] for im_list, dir_curr in zip(im_lists, dirs_full): im_curr = im_list[row_num] im_name = os.path.split(im_curr)[1] caption_curr = ' '.join([os.path.split(dir_curr)[1], im_name]) im_row.append(im_curr.replace(str_replace[0], str_replace[1])) caption_row.append(caption_curr) im_rows.append(im_row) caption_rows.append(caption_row) visualize.writeHTML(out_file_html, im_rows, caption_rows, height=256, width=448)
def visualize_primary_html(): dir_meta = '../scratch' eg_dirs = ['demo_primary', 'demo_primary_f19', 'demo_primary_m17'] # for eg_dir in eg_dirs: # im_files = glob.glob(os.path.join(dir_meta,eg_dir,'*.jpg.jpg')) # for im_file in im_files: # out_file = im_file[:im_file.rindex('.')] # # print im_file, out_file # # raw_input() # shutil.move(im_file, out_file) # visualize.writeHTMLForFolder(os.path.join(dir_meta, eg_dir)) out_file_html = os.path.join(dir_meta, 'all_demos.html') im_html = [] captions_html = [] for filter_num, x, y in itertools.product(range(32), range(2, 5), range(2, 5)): im_row = [] caption_row = [] for eg_dir in eg_dirs: caption_curr = '_'.join([str(val) for val in [filter_num, x, y]]) in_file = os.path.join(dir_meta, eg_dir, caption_curr + '.jpg') in_file = util.getRelPath( in_file.replace(str_replace_viz[0], str_replace_viz[1]), dir_server) im_row.append(in_file) caption_row.append(caption_curr) im_html.append(im_row) captions_html.append(caption_row) visualize.writeHTML(out_file_html, im_html, captions_html, 224, 224)
def just_visualize(): dir_server = '/disk3' horse_dir = os.path.join(dir_server, 'maheen_data', 'eccv_18', 'data/horse_data_cleaned') train_file = os.path.join(horse_dir, 'trainImageList.txt') train_file = os.path.join(horse_dir, 'valImageList.txt') out_file_html = train_file[:train_file.rindex('.')] + '.html' train_data = util.readLinesFromFile(train_file) im_files = [] for line in train_data: im_file = line.split(' ')[0] im_file = im_file[2:] print im_file im_file = os.path.join(horse_dir, im_file) im_file = util.getRelPath(im_file, dir_server) im_files.append(im_file) idx_splits = list( np.linspace(0, len(train_data), num=100, endpoint=True).astype(int)) print idx_splits im_html = [] captions_html = [] for idx_idx, start_idx in enumerate(idx_splits[:-1]): end_idx = idx_splits[idx_idx + 1] row_curr = im_files[start_idx:end_idx] im_html.append(row_curr) captions_html.append([''] * len(row_curr)) visualize.writeHTML(out_file_html, im_html, captions_html, 100, 100) print out_file_html.replace(dir_server, 'http://vision3.cs.ucdavis.edu:1000/')
def visualize_front_side_lists(): dir_server = '/disk3' out_dir = os.path.join(dir_server, 'maheen_data', 'eccv_18', 'data/horse_data_cleaned_yolo') data_dirs = ['train', 'test'] list_files = ['train_kp_front_side.txt', 'val_kp_front_side.txt'] for data_dir, list_file in zip(data_dirs, list_files): out_file_html = os.path.join( out_dir, list_file[:list_file.rindex('.')] + '.html') im_list = util.readLinesFromFile(os.path.join(out_dir, list_file)) im_list = [line_curr.split(' ') for line_curr in im_list] im_list = np.array(im_list) nums = np.unique(im_list[:, 1]) ims_html = [] captions_html = [] for num_curr in nums: ims_curr = im_list[im_list[:, 1] == num_curr, 0] ims_curr = [ os.path.join(out_dir, data_dir, im_curr.replace('+', '%2B')) for im_curr in ims_curr ] ims_html.append( [util.getRelPath(im_curr, dir_server) for im_curr in ims_curr]) captions_html.append([num_curr for im_curr in ims_curr]) ims_html = [list(i) for i in zip(*ims_html)] captions_html = [list(i) for i in zip(*captions_html)] visualize.writeHTML(out_file_html, ims_html, captions_html, 100, 100)
def make_html_for_flip_ver(): dir_meta = '../data/mmi' dir_server = '/disk3' str_replace = ['..', os.path.join(dir_server, 'maheen_data/eccv_18')] click_str = 'http://vision3.idav.ucdavis.edu:1000' anno_file = os.path.join(dir_meta, 'annos_all_oriented.txt') out_file_html = os.path.join(dir_meta, 'annos_all_oriented.html') annos = util.readLinesFromFile(anno_file) ims = [file_curr.split(' ')[0] for file_curr in annos] dirs_rel = list(set([os.path.split(file_curr)[0] for file_curr in ims])) ims_html = [] captions_html = [] for dir_curr in dirs_rel: ims_row = [ file_curr for file_curr in ims if file_curr.startswith(dir_curr) ] ims_row = [ util.getRelPath(im_curr.replace(str_replace[0], str_replace[1]), dir_server) for im_curr in ims_row ] captions_row = [os.path.split(im_curr)[1] for im_curr in ims_row] captions_html.append(captions_row) ims_html.append(ims_row) visualize.writeHTML(out_file_html, ims_html, captions_html, 96, 96)
def k_means(caps, num_clusters, filter_num, x, y, test_im, out_dir_curr, out_file_html, convnet, imsize, rewrite=False): vec_rel_org = caps[:, filter_num, x, y, :] k_meaner = sklearn.cluster.KMeans(n_clusters=num_clusters) vec_rel = sklearn.preprocessing.normalize(vec_rel_org, axis=0) #feature normalize vec_rel = vec_rel_org bins = k_meaner.fit_predict(vec_rel) print bins for val in np.unique(bins): print val, np.sum(bins == val) im_row = [[] for idx in range(num_clusters)] caption_row = [[] for idx in range(num_clusters)] for idx_idx, bin_curr in enumerate(bins): out_file_curr = os.path.join(out_dir_curr, str(idx_idx) + '.jpg') # if not os.path.exists(out_file_curr) or rewrite: im_curr = test_im[idx_idx] rec_field, center = receptive_field.get_receptive_field( convnet, imsize, len(convnet) - 1, x, y) center = [int(round(val)) for val in center] range_x = [ max(0, center[0] - rec_field / 2), min(imsize, center[0] + rec_field / 2) ] range_y = [ max(0, center[1] - rec_field / 2), min(imsize, center[1] + rec_field / 2) ] im_curr = im_curr[range_y[0]:range_y[1], range_x[0]:range_x[1]] # print out_file_curr # raw_input() scipy.misc.imsave(out_file_curr, im_curr) im_row[bin_curr].append(util.getRelPath(out_file_curr, dir_server)) # print bin_curr,np.linalg.norm(vec_rel_org[idx_idx]) caption_row[bin_curr].append( '%d %.4f' % (bin_curr, np.linalg.norm(vec_rel_org[idx_idx]))) # out_file_html = out_dir_curr+'.html' visualize.writeHTML(out_file_html, im_row, caption_row, 40, 40) print out_file_html return im_row, caption_row
def view_reconstruction(): replace_str = ['..', '/disk3/maheen_data/eccv_18'] dir_server = '/disk3' out_dir_results = '../experiments/dynamic_capsules/with_recon_ck_notanh_nosig_fixtrain_0_108_exp_0.001/out_caps_model_test_107' out_dir_recons = os.path.join(out_dir_results, 'recon_im').replace(replace_str[0], replace_str[1]) util.mkdir(out_dir_recons) train_file = '../data/ck_96/train_test_files/test_0.txt' mean_file = '../data/ck_96/train_test_files/train_0_mean.png' std_file = '../data/ck_96/train_test_files/train_0_std.png' im_size = 28 mean_im = scipy.misc.imresize(scipy.misc.imread(mean_file), (im_size, im_size)).astype(np.float32) std_im = scipy.misc.imresize(scipy.misc.imread(std_file), (im_size, im_size)).astype(np.float32) std_im[std_im == 0] = 1. out_all = np.load(os.path.join(out_dir_results, 'out_all.npy')) predictions = np.load(os.path.join(out_dir_results, 'predictions.npy')) labels_all = np.load(os.path.join(out_dir_results, 'labels_all.npy')) caps_all = np.load(os.path.join(out_dir_results, 'caps_all.npy')) recons_all = np.load(os.path.join(out_dir_results, 'recons_all.npy')) lines = util.readLinesFromFile(train_file) im_files_html = [] captions_html = [] out_file_html = os.path.join(out_dir_results, 'recons_viz.html') for idx_line, line in enumerate(lines): im_file, label = line.split(' ') im_file = im_file.replace(replace_str[0], replace_str[1]) recons = recons_all[idx_line][0] recons = (recons * std_im) + mean_im out_file = os.path.join(out_dir_recons, os.path.split(im_file)[1]) scipy.misc.imsave(out_file, recons) im_files_html.append([ util.getRelPath(file_curr, dir_server) for file_curr in [im_file, out_file] ]) captions_html.append( ['true ' + label, 'recon ' + str(predictions[idx_line])]) visualize.writeHTML(out_file_html, im_files_html, captions_html, 28, 28)
def make_html_recon(out_dir_results, mean_file, std_file): out_dir_results = out_dir_results.replace(str_replace[0], str_replace[1]) recon_all_pred = np.load(os.path.join(out_dir_results, 'recon_all.npy')).squeeze() recon_all_gt = np.load(os.path.join(out_dir_results, 'recon_all_gt.npy')).squeeze() im_org = np.load(os.path.join(out_dir_results, 'im_org.npy')).squeeze() out_dir_im = os.path.join(out_dir_results, 'im_recon') util.mkdir(out_dir_im) std_im = scipy.misc.imread(std_file).astype(np.float32) # print np.min(std_im),np.max(std_im) mean_im = scipy.misc.imread(mean_file).astype(np.float32) # print np.min(mean_im),np.max(mean_im) # raw_input() out_file_html = os.path.join(out_dir_results, 'im_recon.html') ims_html = [] captions_html = [] strs_pre = ['org', 'gt', 'pred'] for idx in range(im_org.shape[0]): im_row = [] caption_row = [] for str_curr, np_curr in zip(strs_pre, [im_org, recon_all_gt, recon_all_pred]): im_curr = np_curr[idx] # print np.min(im_curr),np.max(im_curr) im_curr = (im_curr * std_im) + mean_im # im_curr = im_curr-np.min(im_curr) # im_curr = im_curr/np.max(im_curr)*255 out_file_curr = os.path.join(out_dir_im, str_curr + '_' + str(idx) + '.jpg') scipy.misc.imsave(out_file_curr, im_curr) im_row.append(util.getRelPath(out_file_curr, dir_server)) caption_row.append(' '.join([str(idx), str_curr])) ims_html.append(im_row) captions_html.append(caption_row) visualize.writeHTML(out_file_html, ims_html, captions_html, 96, 96) print out_file_html.replace(dir_server, click_str)
def view_loss_curves(dir_exp_meta, pre_split, post_split, range_splits, model_num): dir_server = '/disk3' str_replace = ['..', '/disk3/maheen_data/eccv_18'] out_file_html = os.path.join( dir_exp_meta, pre_split + post_split[1:] + '_loss_curves.html').replace( str_replace[0], str_replace[1]) ims_html = [] captions_html = [] for split_num in range_splits: caption = [str(split_num)] dir_curr = os.path.join(dir_exp_meta, pre_split + str(split_num) + post_split) ims_to_disp = [ os.path.join(dir_curr, 'loss.jpg'), os.path.join(dir_curr, 'val_accu.jpg') ] ims_to_disp = [ im_curr for im_curr in ims_to_disp if os.path.exists(im_curr) ] dirs_res = [ os.path.join(dir_curr, 'results_model_' + str_curr) for str_curr in [str(model_num), str(model_num) + '_center'] ] dirs_res = [ dir_curr for dir_curr in dirs_res if os.path.exists(dir_curr) ] for dir_res in dirs_res: log_file = os.path.join(dir_res, 'log.txt') val_accuracy = util.readLinesFromFile(log_file)[-1] val_accuracy = val_accuracy.split(' ')[-1] caption.append(val_accuracy) caption = [' '.join(caption)] * len(ims_to_disp) ims_html_curr = [ util.getRelPath(loss_file.replace(str_replace[0], str_replace[1]), dir_server) for loss_file in ims_to_disp ] ims_html.append(ims_html_curr) captions_html.append(caption) visualize.writeHTML(out_file_html, ims_html, captions_html, 200, 200) print out_file_html.replace(dir_server, 'http://vision3.idav.ucdavis.edu:1000')
def main(): num_clusters = 50 # script_cluster_contours(num_clusters) # return out_dir_meta = os.path.join('../scratch', 'frames_at_motion') util.mkdir(out_dir_meta) # vid_names = ['ch01_20181209092600' ,'ch03_20181209092844' ,'ch05_20181208230458' ,'ch06_20181209093714'] vid_names = ['all_vids'] vid_dirs = [os.path.join(out_dir_meta, vid_name) for vid_name in vid_names] # max_dim = 487 # dir_server = '/home/maheen/gross_pain' str_replace = ['..', ''] for vid_dir in vid_dirs: contour_file = os.path.join(vid_dir, 'contours.npz') data = np.load(contour_file, allow_pickle=True) files = data['files'] contours = data['contours'] labels_file = os.path.join(vid_dir, 'labels_self_' + str(num_clusters) + '.npy') labels = np.load(labels_file) out_file_html = os.path.join( vid_dir, 'cluster_viz_' + str(num_clusters) + '.html') im_paths = [] captions = [] for label in np.unique(labels): im_paths_curr = files[labels == label] im_paths_curr = [ im_path.replace(str_replace[0], str_replace[1]) for im_path in im_paths_curr ] captions_curr = [ '/'.join(im_path.split('/')[-3::2]) for im_path in im_paths_curr ] im_paths.append(im_paths_curr) captions.append(captions_curr) visualize.writeHTML(out_file_html, im_paths, captions, height=256, width=488) print(out_file_html)
def make_primary_caps_emotion_map(): out_dir_meta = '../experiments/figures'.replace(str_replace[0], str_replace[1]) out_dir_im = os.path.join(out_dir_meta, 'primary_caps_emotion_gridding') util.mkdir(out_dir_im) caps, test_file, convnet, imsize = get_caps_compiled(routed=False) gt_class = np.array([ int(line_curr.split(' ')[1]) for line_curr in util.readLinesFromFile(test_file) ]) num_emotions = np.unique(gt_class).size print 'num_emotions', num_emotions mags = np.linalg.norm(caps, axis=4) mags_org = mags mags = np.transpose(mags, (0, 2, 3, 1)) out_file_html = os.path.join(out_dir_im, 'emotion_grid.html') im_rows = [] caption_rows = [] for filt_num in range(mags.shape[-1]): mags_rel = mags[:, :, :, filt_num] im_row = [] caption_row = [] for emo_num in np.unique(gt_class): mags_emo_rel = mags_rel[gt_class == emo_num, :, :] mags_emo_rel = np.mean(mags_emo_rel, 0) min_val = np.min(mags_emo_rel) max_val = np.max(mags_emo_rel) print mags_emo_rel.shape, min_val, max_val title_curr = '_'.join([str(val) for val in [filt_num, emo_num]]) out_file_curr = os.path.join(out_dir_im, title_curr + '.jpg') visualize.plot_colored_mats(out_file_curr, mags_emo_rel, min_val, max_val, title=title_curr) im_row.append(util.getRelPath(out_file_curr, dir_server)) caption_row.append(title_curr) im_rows.append(im_row) caption_rows.append(caption_row) visualize.writeHTML(out_file_html, im_rows, caption_rows, 50, 50)
def script_viz_mag(): out_dir_htmls = '../experiments/figures/primary_caps_viz'.replace( str_replace[0], str_replace[1]) util.mkdir(out_dir_htmls) out_dir_im = os.path.join(out_dir_htmls, 'im') util.mkdir(out_dir_im) caps, test_file, convnet, imsize = get_caps_compiled() mags = np.linalg.norm(caps, axis=4) print mags.shape print np.min(mags), np.max(mags) test_im = [ scipy.misc.imread(line_curr.split(' ')[0]) for line_curr in util.readLinesFromFile(test_file) ] print len(test_im) print test_im[0].shape for x in range(mags.shape[2]): for y in range(mags.shape[3]): out_file_html = os.path.join(out_dir_htmls, str(x) + '_' + str(y) + '.html') ims_html = [] captions_html = [] for filter_num in range(mags.shape[1]): out_dir_curr = os.path.join( out_dir_im, str(x) + '_' + str(y) + '_' + str(filter_num)) util.mkdir(out_dir_curr) im_row, caption_row = save_ims(mags, filter_num, x, y, test_im, out_dir_curr, convnet, imsize) im_row = [ util.getRelPath(im_curr, dir_server) for im_curr in im_row ] # caption_row = [os.path.split(im_curr)[1][:-4] for im_curr in im_row] ims_html.append(im_row[:10] + im_row[-10:]) captions_html.append(caption_row[:10] + caption_row[-10:]) visualize.writeHTML(out_file_html, ims_html, captions_html, 40, 40)
def visualize_max_direction_im(): model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True' model_file_name = 'model_0.pt' model_file = os.path.join('../../eccv_18/experiments', model_name, model_file_name) out_dir = os.path.join('../experiments', model_name, 'au_info') min_mag = 0.9 out_file_html = os.path.join(out_dir, 'max_au_diff_' + str(min_mag) + '.html') ims_html = [] captions_html = [] for au_num, au_curr in enumerate(au_map): # range(len(au_map)): out_dir_caps = os.path.join(out_dir, 'au_' + str(au_num) + '_caps') test_file = os.path.join(out_dir, 'correct_' + str(au_num) + '.txt') out_file = os.path.join(out_dir_caps, 'min_idx_' + str(min_mag) + '.txt') im_files = util.readLinesFromFile(test_file) idx_rel = [int(val) for val in util.readLinesFromFile(out_file)] ims_curr = [ util.getRelPath( im_files[idx_curr].replace(str_replace[0], str_replace[1]), dir_server) for idx_curr in idx_rel ] # range(0,len(im_files),100)] # ] # for idx_curr in idx_rel: # im_curr = im_files[idx_curr] # im_curr = im_curr.replace(str_replace[0],str_replace[1]) # im_curr = util.getRelPath(dir_server, im_curr) # ims_curr.append(im_curr) ims_html.append(ims_curr) captions_html.append([str(au_curr)] * len(ims_curr)) visualize.writeHTML(out_file_html, ims_html, captions_html, 256, 256) print out_file_html.replace(str_replace_viz[0], str_replace_viz[1]).replace( dir_server, click_str)
def verify_html(): dir_meta = '../data/mmi' dir_server = '/disk3' str_replace = ['..', os.path.join(dir_server, 'maheen_data/eccv_18')] click_str = 'http://vision3.idav.ucdavis.edu:1000' file_pres = ['train', 'test_side', 'test_front'] folds = [0, 1] out_dir_train = os.path.join(dir_meta, 'train_test_files') for file_pre in file_pres: for fold in folds: anno_file = os.path.join(out_dir_train, file_pre + '_' + str(fold) + '.txt') out_file_html = anno_file[:anno_file.rindex('.')] + '.html' # anno_file = os.path.join(dir_meta,'annos_all_oriented.txt') # out_file_html = os.path.join(dir_meta, 'annos_all_oriented.html') annos = util.readLinesFromFile(anno_file) ims = [file_curr.split(' ')[0] for file_curr in annos] dirs_rel = list( set([os.path.split(file_curr)[0] for file_curr in ims])) ims_html = [] captions_html = [] for dir_curr in dirs_rel: ims_row = [ file_curr for file_curr in ims if file_curr.startswith(dir_curr) ] ims_row = [ util.getRelPath( im_curr.replace(str_replace[0], str_replace[1]), dir_server) for im_curr in ims_row ] captions_row = [ os.path.split(im_curr)[1] for im_curr in ims_row ] captions_html.append(captions_row) ims_html.append(ims_row) visualize.writeHTML(out_file_html, ims_html, captions_html, 96, 96) print out_file_html.replace(dir_server, click_str)
def main(): # make_train_test_splits_horse_based() dir_server = '/disk3' im_dir = os.path.join(dir_server, 'maheen_data', 'eccv_18', 'data/horse_51') data_dir_meta = '../data/horse_51' out_dir_split = os.path.join(data_dir_meta, 'train_test_split_horse_based') for split_num in range(6): test_file = os.path.join(out_dir_split, 'test_' + str(split_num) + '.txt') out_file_html = test_file[:test_file.rindex('.')] + '.html' ims = [ line_curr.split(' ')[0] for line_curr in util.readLinesFromFile(test_file) ] ims = [im.replace(data_dir_meta, im_dir) for im in ims] print ims[0] ims = [util.getRelPath(im, dir_server) for im in ims] # print ims visualize.writeHTML(out_file_html, [ims], [ims], 224, 224) print out_file_html
def save_im_make_html(strs_pre, out_file_html, out_dir_im, ims_all, caption_arrs): ims_html = [] captions_html = [] for idx_row in range(ims_all[0].shape[0]): im_row = [] caption_row = [] for idx_col, str_pre in enumerate(strs_pre): im_curr = ims_all[idx_col][idx_row] out_file_curr = os.path.join(out_dir_im, str_pre + '_' + str(idx_row) + '.jpg') # im_curr = im_curr+np.min(im_curr) print np.min(im_curr), np.max(im_curr) # im_curr = im_curr.astype(np.uint8) scipy.misc.imsave(out_file_curr, im_curr) caption_curr = [str(idx_col), str_pre] if caption_arrs[idx_col] is not None: caption_curr = caption_curr + [ str(val) for val in caption_arrs[idx_col][idx_row] if val != 0 ] caption_curr = ' '.join(caption_curr) caption_row.append(caption_curr) im_row.append(util.getRelPath(out_file_curr, dir_server)) # raw_input() ims_html.append(im_row) captions_html.append(caption_row) visualize.writeHTML(out_file_html, ims_html, captions_html, 96, 96) print out_file_html.replace(dir_server, click_str)
def make_cooc_html(just_train=False): # read lines from files dir_train_test_files = '../data/ucf101/train_test_files' train_file = os.path.join(dir_train_test_files, 'train_just_primary_corrected.txt') test_file = os.path.join(dir_train_test_files, 'test_just_primary_corrected.txt') if just_train: out_dir = '../scratch/i3d_dists_just_train' out_dir_html = '../scratch/i3d_dists_just_train_htmls' lines = util.readLinesFromFile(train_file) else: out_dir = '../scratch/i3d_dists' out_dir_html = '../scratch/i3d_dists_htmls' lines = util.readLinesFromFile(train_file) + util.readLinesFromFile( test_file) # get vid names per class npy_files = [line_curr.split(' ')[0] for line_curr in lines] vid_names_per_class, class_id = get_vid_names_per_class(lines) #make the html paths util.mkdir(out_dir_html) dir_server = '/disk2/maheen_data' str_replace = ['..', os.path.join(dir_server, 'nn_net')] # make the pre lists n_vals = [10, 25, 50, 100] post_dirs = ['mat', 'bg', 'fg'] in_dirs = [[] for post_dir in post_dirs] in_dir_strs = [[] for post_dir in post_dirs] out_dir_htmls = [ os.path.join(out_dir_html, post_dir) for post_dir in post_dirs ] for idx_post_dir, post_dir in enumerate(post_dirs): for n_val in n_vals: dir_curr = os.path.join(out_dir, 'arr_coocs_' + str(n_val) + '_viz', post_dir) str_curr = 'N ' + str(n_val) in_dirs[idx_post_dir].append((dir_curr, str_curr)) print in_dirs for idx_html, out_dir_html in enumerate(out_dir_htmls): util.mkdir(out_dir_html) in_dirs_curr = in_dirs[idx_html] for idx_class, vid_names in enumerate(vid_names_per_class): out_file_html = os.path.join(out_dir_html, class_names[idx_class] + '.html') rows = [] captions = [] for vid_name in vid_names: row_curr = [] caption_curr = [] for in_dir_curr, str_curr in in_dirs_curr: file_curr = os.path.join(in_dir_curr, vid_name + '.jpg') file_curr = util.getRelPath( file_curr.replace(str_replace[0], str_replace[1]), dir_server) row_curr.append(file_curr) caption_curr.append(str_curr + ' ' + vid_name) rows.append(row_curr) captions.append(caption_curr) # (out_file_html,im_paths,captions,height=height,width=width) visualize.writeHTML(out_file_html, rows, captions, height=330, width=400)
def deep_dream_au_max_dist(): model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True' model_file_name = 'model_0.pt' model_file = os.path.join('../../eccv_18/experiments', model_name, model_file_name) out_dir = os.path.join('../experiments', model_name, 'au_info') min_mag = 0.7 mean_file = 'vgg' std_file = 'vgg' model = torch.load(model_file) print model dreamer = Deep_Dream(mean_file, std_file) octave_n = 2 num_iterations = 100 learning_rate = 1e-1 sigma = [0.6, 0.5] max_jitter_all = [0, 5, 10, 20] for max_jitter in max_jitter_all: out_dir_im = os.path.join(out_dir, 'max_dist_dd_' + str(max_jitter)) util.mkdir(out_dir_im) out_file_html = out_dir_im + '.html' ims_html = [] captions_html = [] for au_num, au_curr in enumerate(au_map): out_dir_caps = os.path.join(out_dir, 'au_' + str(au_num) + '_caps') test_file = os.path.join(out_dir, 'correct_' + str(au_num) + '.txt') out_file = os.path.join(out_dir_caps, 'min_idx_' + str(min_mag) + '.txt') im_files = util.readLinesFromFile(test_file) idx_rel = [int(val) for val in util.readLinesFromFile(out_file)] ims_curr = [im_files[idx_curr] for idx_curr in idx_rel] ims_html_row = [] captions_html_row = [] for idx_im, im_curr in enumerate(ims_curr): file_str = '_'.join([str(val) for val in [au_curr, idx_im]]) + '.jpg' out_file = os.path.join(out_dir_im, file_str) out_im = dreamer.dream_fc_caps( model, im_curr, octave_n=octave_n, control=au_num, color=True, num_iterations=num_iterations, learning_rate=learning_rate, sigma=sigma, max_jitter=max_jitter)[:, :, ::-1] scipy.misc.imsave(out_file, out_im) ims_html_row.append( util.getRelPath( im_curr.replace(str_replace[0], str_replace[1]), dir_server)) captions_html_row.append('org') ims_html_row.append( util.getRelPath( out_file.replace(str_replace_viz[0], str_replace_viz[1]), dir_server)) captions_html_row.append(file_str) ims_html.append(ims_html_row) captions_html.append(captions_html_row) # break visualize.writeHTML(out_file_html, ims_html, captions_html, 256, 256) print out_file_html.replace(str_replace_viz[0], str_replace_viz[1]).replace( dir_server, click_str)
def plot_specific_patches(): dirs_rel = get_ck_16_dirs() test_pre = '../data/ck_96/train_test_files/test_' im_files = [] for num in range(10): lines = util.readLinesFromFile(test_pre+str(num)+'.txt') im_files = im_files+[line_curr.split(' ')[0] for line_curr in lines] print len(im_files) print im_files[0] out_dir = '../experiments/figures/ck_routing' out_dir_im_meta = '../experiments/figures_rebuttal/ck_routing' util.makedirs(out_dir) util.makedirs(out_dir_im_meta) mats_names = ['labels','preds','routes_0','routes_1'] mat_arrs = [[] for name in mats_names] for dir_curr in dirs_rel: for idx_mat_name,mat_name in enumerate(mats_names): arr_curr_file = os.path.join(dir_curr,mat_name+'.npy') arr_curr = np.load(arr_curr_file) mat_arrs[idx_mat_name].append(arr_curr) # mat_arrs = [np.concatenate(mat_arr,0) for mat_arr in mat_arrs] axis_combine = [0,0,1,1] mat_arrs = [np.concatenate(mat_arr,axis_curr) for mat_arr,axis_curr in zip(mat_arrs,axis_combine)] for idx_mat_arr,mat_arr in enumerate(mat_arrs): print mat_arr.shape,len(im_files) # print mat_arrs[0][:10],mat_arrs[1][:10] accuracy = np.sum(mat_arrs[0]==mat_arrs[1])/float(mat_arrs[0].size) print 'accuracy',accuracy # print mat_arrs routes_all = mat_arrs[2:] # print len(routes_all) # raw_input() num_emos = 8 emo_strs = ['Neutral','Anger', 'Contempt','Disgust', 'Fear', 'Happiness', 'Sadness', 'Surprise'] _, _, convnet, imsize = pcv.get_caps_compiled() print convnet,imsize # tuples_to_save = [(3,1),(3,3),(1,0)] # arr_emo = (1,[(3,1),(3,3),(1,0)]) arr_emo = (3,[(2,3),(3,2),(4,3)]) for label_curr,tuples_to_save in [arr_emo]: # label_curr = 1 label_compare = label_curr out_dir_im = os.path.join(out_dir_im_meta,emo_strs[label_curr]) util.mkdir(out_dir_im) print out_dir_im # raw_input() idx_keep = np.logical_and(mat_arrs[0]==label_curr,mat_arrs[0]==mat_arrs[1]) files_keep = [im_curr for idx_im_curr,im_curr in enumerate(im_files) if idx_keep[idx_im_curr]] out_file_html = os.path.join(out_dir_im,'patches.html') html_rows = [] caption_rows = [] for x,y in tuples_to_save: html_row = [] caption_row = [] for idx_test_im_curr,test_im in enumerate(files_keep): im_curr = scipy.misc.imread(test_im) out_file_curr = os.path.join(out_dir_im,'_'.join([str(val) for val in [idx_test_im_curr,x,y]])+'.jpg') # print out_file_curr # raw_input() rec_field, center = receptive_field.get_receptive_field(convnet,imsize,len(convnet)-1, x,y) center = [int(round(val)) for val in center] range_x = [max(0,center[0]-rec_field/2),min(imsize,center[0]+rec_field/2)] range_y = [max(0,center[1]-rec_field/2),min(imsize,center[1]+rec_field/2)] # print range_x # raw_input() patch = im_curr[range_y[0]:range_y[1],range_x[0]:range_x[1]] # print out_file_curr # raw_input() scipy.misc.imsave(out_file_curr,patch) html_row.append(util.getRelPath(out_file_curr.replace(str_replace[0],str_replace[1]),dir_server)) caption_row.append(' '.join([str(val) for val in [idx_test_im_curr,x,y]])) html_rows.append(html_row) caption_rows.append(caption_row) visualize.writeHTML(out_file_html,html_rows,caption_rows,40,40)
def script_view_clusters(routed=False, mag_sorted=True): out_dir_meta = '../experiments/figures/primary_caps_viz'.replace( str_replace[0], str_replace[1]) out_dir_im = os.path.join(out_dir_meta, 'im_all_patches/train') out_dir_meta = '../experiments/figures/primary_caps_viz_clusters'.replace( str_replace[0], str_replace[1]) util.mkdir(out_dir_meta) caps, test_file, convnet, imsize, routes = get_caps_compiled(routed=True) mags = np.linalg.norm(caps, axis=4) mags_org = mags # print 'mags_org.shape',mags_org.shape mags = np.transpose(mags, (0, 2, 3, 1)) # print mags.shape mags = np.reshape( mags, (mags.shape[0] * mags.shape[1] * mags.shape[2], mags.shape[3])) # print mags.shape # print routes.shape # print test_file gt_class = [ int(line_curr.split(' ')[1]) for line_curr in util.readLinesFromFile(test_file) ] routes_gt = routes[gt_class, range(routes.shape[1])].squeeze() mag_routes = np.linalg.norm(routes_gt, axis=2) # np.sum(routes_gt,axis=2) # mag_routes = np.reshape(mag_routes, (mag_routes.shape[0], 32, 6, 6, 1)) # print np.min(mag_routes),np.max(mag_routes) # print mag_routes.shape # print caps.shape if routed: caps = caps * mag_routes caps_org = np.array(caps) caps = np.transpose(caps, (0, 2, 3, 1, 4)) # print caps.shape caps = np.reshape(caps, (caps.shape[0] * caps.shape[1] * caps.shape[2], caps.shape[3], caps.shape[4])) # print caps.shape # print mags.shape idx_helper = range(caps.shape[0]) # print len(idx_helper) idx_helper = np.reshape( idx_helper, (caps_org.shape[0], caps_org.shape[2], caps_org.shape[3])) # print idx_helper.shape num_to_keep = 100 num_clusters = 32 for filt_num in range(caps.shape[1]): if mag_sorted: out_file_html = os.path.join(out_dir_meta, str(filt_num) + '_mag_sorted.html') elif routed: out_file_html = os.path.join( out_dir_meta, str(filt_num) + '_route_weighted.html') else: out_file_html = os.path.join(out_dir_meta, str(filt_num) + '.html') im_rows = [] caption_rows = [] caps_curr = caps[:, filt_num] mags_curr = mags[:, filt_num] k_meaner = sklearn.cluster.KMeans(n_clusters=num_clusters) vec_rel = sklearn.preprocessing.normalize(caps_curr, axis=1) # sklearn.preprocessing.normalize(sklearn.preprocessing.normalize(caps_curr,axis=0),axis=1) #feature normalize # print 'vec_rel.shape',vec_rel.shape print vec_rel.shape # numpy.random.permutation(x) k_meaner.fit(np.random.permutation(vec_rel)) cluster_centers = k_meaner.cluster_centers_ print cluster_centers.shape cluster_belongings = k_meaner.predict(vec_rel) # print cluster_centers,cluster_centers.shape for idx_cluster_center, cluster_center in enumerate(cluster_centers): if mag_sorted: idx_rel = np.where(cluster_belongings == idx_cluster_center)[0] # print idx_rel.shape # print idx_rel[:10] mag_rel = mags_curr[idx_rel] idx_sort = np.argsort(mag_rel)[::-1] idx_sort = list(idx_rel[idx_sort]) # print idx_sort[:10] # raw_input() else: cluster_center = cluster_center[np.newaxis, :] # print (vec_rel-cluster_center).shape dist = np.linalg.norm(vec_rel - cluster_center, axis=1) # print dist.shape # print mags.shape # raw_input() idx_sort = list(np.argsort(dist)) idx_sort = idx_sort[:num_to_keep] + idx_sort[-num_to_keep:] im_row = [] caption_row = [] for idx_idx, idx_curr in enumerate(idx_sort): arg_multi_dim = np.where(idx_helper == idx_curr) arg_multi_dim = [arr[0] for arr in arg_multi_dim] file_curr = os.path.join( out_dir_im, '_'.join([str(val) for val in arg_multi_dim]) + '.jpg') assert os.path.exists(file_curr) im_row.append(util.getRelPath(file_curr, dir_server)) caption_row.append('%d %.4f' % (idx_idx, mags_curr[idx_curr])) # str(idx_idx)+' '+str(filt_num)) im_rows.append(im_row) caption_rows.append(caption_row) visualize.writeHTML(out_file_html, im_rows, caption_rows, 40, 40) print out_file_html.replace(dir_server, click_str)
def make_html_test_data(): replace_str = ['..', '/disk3/maheen_data/eccv_18'] dir_server = '/disk3' # out_dir_results = '../experiments/dynamic_capsules/ck_0_108_exp_0.001/out_caps_model_test_107' # train_file = '../data/ck_96/train_test_files/test_0.txt' out_dir_results = '../experiments/dynamic_capsules/with_recon_ck_tanh0_108_exp_0.001/out_caps_model_test_107' train_file = '../data/ck_96/train_test_files/test_0.txt' # reconstruct = True out_all = np.load(os.path.join(out_dir_results, 'out_all.npy')) predictions = np.load(os.path.join(out_dir_results, 'predictions.npy')) labels_all = np.load(os.path.join(out_dir_results, 'labels_all.npy')) caps_all = np.load(os.path.join(out_dir_results, 'caps_all.npy')) # for every class have an html with every row going most to least lines = util.readLinesFromFile(train_file) lines = [line.split(' ') for line in lines] im_files = np.array([line[0] for line in lines]) labels_file = np.array([int(line[1]) for line in lines]) assert np.all(labels_file == labels_all) for emotion in list(np.unique(labels_all)) + ['all']: if emotion == 'all': out_file_html = os.path.join(out_dir_results, str(emotion) + '.html') bin_emotion = labels_all == predictions caps_rel = [ caps_all[im_num, labels_all[im_num], :][np.newaxis, :] for im_num in range(labels_all.shape[0]) if labels_all[im_num] == predictions[im_num] ] caps_rel = np.concatenate(caps_rel, 0) print caps_rel.shape # raw_input() # caps_rel = caps_rel[:,labels_all[bin_emotion],:] else: out_file_html = os.path.join(out_dir_results, str(emotion) + '.html') bin_emotion = np.logical_and(labels_all == emotion, labels_all == predictions) caps_rel = caps_all[bin_emotion, emotion, :] print caps_rel.shape im_files_rel = im_files[bin_emotion] print im_files_rel.shape # raw_input() im_files_html = [] captions_html = [] for dimension in range(caps_rel.shape[1]): idx_sort = np.argsort(caps_rel[:, dimension])[::-1] im_files_sorted = im_files_rel[idx_sort] caps_sorted = caps_rel[idx_sort, dimension] print idx_sort.shape im_files_curr = [ util.getRelPath( file_curr.replace(replace_str[0], replace_str[1]), dir_server) for file_curr in im_files_sorted ] captions_curr = [ str(dimension) + ' ' + os.path.split(file_curr)[1] + ' ' + str(caps_sorted[idx_file_curr]) for idx_file_curr, file_curr in enumerate(im_files_sorted) ] im_files_html.append(im_files_curr) captions_html.append(captions_curr) visualize.writeHTML(out_file_html, im_files_html, captions_html, 96, 96)
def make_primary_au_specific_comparative_html(vgg_ft=False): ids_chosen = ['F001', 'F019', 'M008', 'M014'] ids_chosen = ['F004', 'F007', 'F010', 'F013', 'F016', 'F022', 'M005', 'M011', 'M017'] model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True' out_dir = os.path.join('../experiments',model_name,'au_info').replace(str_replace_viz[0],str_replace_viz[1]) out_dir_in = out_dir if vgg_ft: model_name = 'vgg_face_finetune/bp4d_256_train_test_files_256_color_align_0_False_MultiLabelSoftMarginLoss_10_step_5_0.1_0_0.0001_0.001_0.001_False' out_dir = os.path.join('../experiments',model_name,'au_info').replace(str_replace_viz[0],str_replace_viz[1]) out_dir_html = os.path.join(out_dir,'max_viz_primary_comparison_htmls') util.mkdir(out_dir_html) au_map = [1,2,4,6,7,10,12,14,15,17,23,24] # id_curr = 'M008' filter_range = [4507,339,498] aus_chosen = [1,2,4] pos_x = 3 pos_y = 2 # aus_chosen = [12,14,15,23,24] # pos_x = 3 # pos_y = 3 for id_curr in ids_chosen: out_file_html = '_'.join([str(val) for val in [id_curr,'aus']+aus_chosen]) out_file_html = os.path.join(out_dir_html,out_file_html+'.html') ims_html = [] captions_html =[] for au_curr in aus_chosen: au_num = au_map.index(au_curr) im_row = [] caption_row = [] out_dir_im = os.path.join(out_dir, 'max_viz_primary_'+str(au_curr)) util.mkdir(out_dir_im) # out_file_html = os.path.join(out_dir_im,'viz_all.html') # ims_html = [] # captions_html = [] max_au_file = os.path.join(out_dir_in,'au_'+str(au_num)+'_max_idx.txt') im_files = util.readLinesFromFile(max_au_file) ids = [file_curr.split('/')[-3] for file_curr in im_files] rel_idx = np.in1d(ids,ids_chosen) print rel_idx rel_file = np.array(im_files)[rel_idx] rel_file = [file_curr for file_curr in rel_file if id_curr in file_curr] print rel_file assert len(rel_file)<=1 if len(rel_file)==0: continue rel_file = rel_file[0] id_curr = rel_file.split('/')[-3] out_dir_im_curr = os.path.join(out_dir_im, id_curr) util.mkdir(out_dir_im_curr) str_all = ['_'.join([str(val) for val in [filt,y,x]])+'.jpg' for filt,y,x in itertools.product(filter_range, [pos_y],[pos_x])] ims_row = [util.getRelPath(os.path.join(out_dir_im_curr,str_curr),dir_server) for str_curr in str_all] ims_row = [util.getRelPath(rel_file.replace(str_replace[0],str_replace[1]),dir_server)]+ims_row str_all = ['AU '+str(au_curr)]+str_all ims_html.append(ims_row) captions_html.append(str_all) # ims_html= np.array(ims_html).T # captions_html = np.array(captions_html).T visualize.writeHTML(out_file_html, ims_html,captions_html,224,224) print out_file_html
def dream_primary_aus_specific(vgg_ft=False): ids_chosen = ['F001', 'F019', 'M008', 'M014'] # ids_chosen = ['F004', 'F007', 'F010', 'F013', 'F016', 'F022', 'M005', 'M011', 'M017']+['F001', 'F019', 'M008', 'M014'] model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True' out_dir = os.path.join('../experiments',model_name,'au_info').replace(str_replace_viz[0],str_replace_viz[1]) out_dir_in = out_dir if vgg_ft: model_name = 'vgg_face_finetune/bp4d_256_train_test_files_256_color_align_0_False_MultiLabelSoftMarginLoss_10_step_5_0.1_0_0.0001_0.001_0.001_False' out_dir = os.path.join('../experiments',model_name,'au_info').replace(str_replace_viz[0],str_replace_viz[1]) au_map = [1,2,4,6,7,10,12,14,15,17,23,24] # range_x_map = {1:[3], # 2:[3], # 4:[3], # } range_x_map = {1:[3], 2:[3], 4:[3], 6:[3], 7:[3], 10:[3], 12:[3], 14:[3], 15:[3], 17:[3], 23:[3], 24:[3], } range_y_map = {1:[1,2], 2:[1,2], 4:[1,2], 6:[1,2], 7:[1,2], 10:[3,4], 12:[3,4], 14:[3,4], 15:[3,4], 17:[3,4], 23:[3,4], 24:[3,4], } filter_range = [i for i in range(32) if i not in [0,5,6,9,21] ] # range(5,6) for au_num, au_curr in enumerate(au_map): if au_num<0: continue # (au_curr, range_x, range_y) in enumerate(au_params): # if vgg_ft: # range_x = range(1,6) # range_y = range(1,6) # else: range_x = range_x_map[au_curr] range_y = range_y_map[au_curr] out_dir_im = os.path.join(out_dir, 'max_viz_primary_'+str(au_curr)) util.mkdir(out_dir_im) out_file_html = os.path.join(out_dir_im,'viz_all.html') ims_html = [] captions_html = [] max_au_file = os.path.join(out_dir_in,'au_'+str(au_num)+'_max_idx.txt') im_files = util.readLinesFromFile(max_au_file) ids = [file_curr.split('/')[-3] for file_curr in im_files] rel_idx = np.in1d(ids,ids_chosen) print rel_idx rel_files = np.array(im_files)[rel_idx] print rel_files for rel_file in rel_files: id_curr = rel_file.split('/')[-3] out_dir_im_curr = os.path.join(out_dir_im, id_curr) util.mkdir(out_dir_im_curr) if vgg_ft: tups = sdd.deep_dream_vgg_ft(in_file=rel_file, out_dir_im=out_dir_im_curr, octave_n=2, num_iterations=200, learning_rate=3e-2, sigma = [0.6,0.5], return_caps = False, primary = True, filter_range = filter_range, x_range = range_y, y_range = range_x) str_all = ['_'.join([str(val) for val in tup_curr])+'.jpg' for tup_curr in tups] else: sdd.deep_dream(in_file=rel_file, out_dir_im=out_dir_im_curr, octave_n=2, num_iterations=200, learning_rate=5e-1, sigma = [0.6,0.5], return_caps = False, primary = True, filter_range = filter_range, x_range = range_y, y_range = range_x) str_all = ['_'.join([str(val) for val in [filt,y,x]])+'.jpg' for filt,y,x in itertools.product(filter_range, range_y,range_x)] visualize.writeHTMLForFolder(out_dir_im_curr) ims_row = [util.getRelPath(os.path.join(out_dir_im_curr,str_curr),dir_server) for str_curr in str_all] ims_html.append(ims_row) captions_html.append(str_all) ims_html= np.array(ims_html).T captions_html = np.array(captions_html).T visualize.writeHTML(out_file_html, ims_html,captions_html,224,224)
def change_direction_and_retrieve(): out_dir_meta = '../experiments/figures/primary_caps_viz'.replace( str_replace[0], str_replace[1]) out_dir_im = os.path.join(out_dir_meta, 'im_all_patches/train') out_dir_meta = '../experiments/figures/primary_caps_viz_change_direction'.replace( str_replace[0], str_replace[1]) util.mkdir(out_dir_meta) caps, test_file, convnet, imsize, routes = get_caps_compiled(routed=True) mags = np.linalg.norm(caps, axis=4) mags = np.transpose(mags, (0, 2, 3, 1)) # mags = np.reshape(mags,(mags.shape[0]*mags.shape[1]*mags.shape[2],mags.shape[3])) gt_class = [ int(line_curr.split(' ')[1]) for line_curr in util.readLinesFromFile(test_file) ] caps_org = np.array(caps) caps = np.transpose(caps, (0, 2, 3, 1, 4)) # caps = np.reshape(caps,(caps.shape[0]*caps.shape[1]*caps.shape[2],caps.shape[3],caps.shape[4])) idx_helper = range(caps.shape[0] * caps.shape[1] * caps.shape[2]) idx_helper = np.reshape( idx_helper, (caps_org.shape[0], caps_org.shape[2], caps_org.shape[3])) print caps.shape print mags.shape raw_input() num_to_keep = 100 num_clusters = 32 # range_values = np.arange(-0.5, mag_range = np.arange(-0.5, 0.6, 0.05) for filt_num in range(caps.shape[3]): out_file_html = os.path.join( out_dir_meta, '_'.join([str(val) for val in [filt_num]]) + '.html') html_rows = [] html_captions = [] for row_num in range(1, 6): for col_num in range(1, 6): caps_rel = caps[:, row_num, col_num, filt_num, :] mags_rel = mags[:, row_num, col_num, filt_num] idx_max = np.argmax(mags_rel, 0) caps_max = caps_rel[idx_max] caps_norms = np.linalg.norm(caps_rel, axis=1, keepdims=True) caps_unit = caps_rel print caps_norms.shape print caps_rel.shape rel_idx = [idx_max, row_num, col_num] file_max = os.path.join( out_dir_im, '_'.join([str(val) for val in rel_idx]) + '.jpg') for caps_dim in range(caps_max.shape[0]): caps_max = caps_max / np.linalg.norm(caps_max) html_row_curr = [file_max] caption_row_curr = [ 'org %d %d %.2f' % (filt_num, caps_dim, mags_rel[idx_max]) ] for dim_mag_curr in mag_range: caps_new = caps_max[:] caps_new[caps_dim] = dim_mag_curr # print caps_new caps_new = caps_new / np.linalg.norm(caps_new, keepdims=True) # print caps_new distances = np.abs( np.matmul(caps_unit, caps_new[:, np.newaxis])) # min_idx = np.argsort(distances); closest_idx = np.argmin(distances) rel_idx = [closest_idx, row_num, col_num] file_curr = os.path.join( out_dir_im, '_'.join([str(val) for val in rel_idx]) + '.jpg') caption_curr = '%.2f' % (dim_mag_curr) # , distances[closest_idx]) html_row_curr.append(file_curr) caption_row_curr.append(caption_curr) html_row_curr = [ util.getRelPath(file_curr, dir_server) for file_curr in html_row_curr ] html_rows.append(html_row_curr) html_captions.append(caption_row_curr) visualize.writeHTML(out_file_html, html_rows, html_captions, 40, 40) print out_file_html
def make_vid_viz(): dir_videos = '../data/ucf101/test_data/rgb_10_fps_256/' # video_name_list = ['video_test_0001268'] # video_name_list = ['video_test_0001268', # 'video_test_0001076', # 'video_test_0001118', # 'video_test_0001038', # 'video_test_0000698', # 'video_test_0000844'] # video_name_list = ['video_test_0000964', # 'video_test_0000601', # 'video_test_0001343', # 'video_test_0000273', # 'video_test_0001460'] video_name_list = [ 'video_test_0000672', 'video_test_0000129', 'video_test_0000635' ] video_name_list = [ 'video_test_0000006', 'video_test_0000353', 'video_test_0000129' ] # video_name_list =['video_test_0001463', # 'video_test_0000814', # 'video_test_0001080', # 'video_test_0001174', # 'video_test_0000964', # 'video_test_0000701', # 'video_test_0001040'] # video_name_list = ['video_test_0001164'] # 'video_test_0000006'] # ['video_test_0000026', # 'video_test_0000073'] # dir_videos.replace('..','maheennn_net' out_dir_html = '../scratch/qualitative_figs_failure' util.mkdir(out_dir_html) # /disk1/maheen-data/nn_net/data dir_server = '/disk2' str_replace = ['..', '/nn_net'] for vid in video_name_list: dir_curr = os.path.join(dir_videos, vid) ims = glob.glob(os.path.join(dir_curr, '*.jpg')) ims.sort() idx_to_pick = np.linspace(0, len(ims), 10, endpoint=False) print len(ims), ims[0] print idx_to_pick ims = [ims[int(idx)] for idx in idx_to_pick] out_file_html = os.path.join(out_dir_html, vid + '.html') im_row = [] caption_row = [] for im in ims: print im im_row.append(im.replace(str_replace[0], str_replace[1])) # caption_row.append(os.path.split(im)[1]) caption_row.append('') visualize.writeHTML(out_file_html, [im_row], [caption_row], height=256, width=455) print out_file_html
def pca(caps, num_clusters, filter_num, x, y, test_im, out_dir_curr, out_file_html, convnet, imsize, rewrite=False): vec_rel = caps[:, filter_num, x, y, :] # pca = sklearn.decomposition.PCA(n_components=8, whiten = True) # vec_rel = sklearn.preprocessing.normalize(vec_rel_org,axis=0) #feature normalize # pca.fit(vec_rel_org) # print pca.explained_variance_ratio_ , np.sum(pca.explained_variance_ratio_) # vec_rel = pca.transform(vec_rel_org) # print vec_rel.shape im_rows = [] caption_rows = [] for vec_curr_idx in range(vec_rel.shape[1]): directions = vec_rel[:, vec_curr_idx] # directions = vec_rel/np.linalg.norm(vec_rel,axis=1,keepdims=True) # directions = np.arctan(directions[:,0]/directions[:,1]) # print np.min(directions), np.max(directions) idx_sort = np.argsort(directions) # print vec_rel.shape # plt.figure() # plt.plot(directions[:,0],directions[:,1],'*b') # plt.savefig(out_dir_curr+'.jpg') # plt.close() # raw_input() im_row = [] # [] for idx in range(num_clusters)] caption_row = [] # [] for idx in range(num_clusters)] for idx_idx, idx_curr in enumerate(idx_sort): out_file_curr = os.path.join(out_dir_curr, str(idx_idx) + '.jpg') # if not os.path.exists(out_file_curr) or rewrite: im_curr = test_im[idx_curr] rec_field, center = receptive_field.get_receptive_field( convnet, imsize, len(convnet) - 1, x, y) center = [int(round(val)) for val in center] range_x = [ max(0, center[0] - rec_field / 2), min(imsize, center[0] + rec_field / 2) ] range_y = [ max(0, center[1] - rec_field / 2), min(imsize, center[1] + rec_field / 2) ] im_curr = im_curr[range_y[0]:range_y[1], range_x[0]:range_x[1]] # print out_file_curr # raw_input() scipy.misc.imsave(out_file_curr, im_curr) im_row.append(util.getRelPath(out_file_curr, dir_server)) # [bin_curr].append(util.getRelPath(out_file_curr,dir_server)) # print bin_curr,np.linalg.norm(vec_rel_org[idx_idx]) caption_row.append('%d %.2f' % (idx_curr, directions[idx_curr])) im_rows.append(im_row) caption_rows.append(caption_row) # out_file_html = out_dir_curr+'.html' visualize.writeHTML(out_file_html, im_rows, caption_rows, 40, 40) print out_file_html
def script_view_route_weighted_patches_sorted(): out_dir_meta = '../experiments/figures/primary_caps_viz'.replace( str_replace[0], str_replace[1]) out_dir_im = os.path.join(out_dir_meta, 'im_all_patches/train') caps, test_file, convnet, imsize, routes = get_caps_compiled(routed=True) print routes.shape print test_file gt_class = [ int(line_curr.split(' ')[1]) for line_curr in util.readLinesFromFile(test_file) ] routes_gt = routes[gt_class, range(routes.shape[1])].squeeze() mag_routes = np.linalg.norm(routes_gt, axis=2) # np.sum(routes_gt,axis=2) # mag_routes = np.reshape(mag_routes, (mag_routes.shape[0], 32, 6, 6, 1)) print np.min(mag_routes), np.max(mag_routes) print mag_routes.shape print caps.shape caps = caps * mag_routes mags = np.linalg.norm(caps, axis=4) # mags = mags*mag_routes mags_org = mags print 'mags_org.shape', mags_org.shape mags = np.transpose(mags, (0, 2, 3, 1)) print mags.shape mags = np.reshape( mags, (mags.shape[0] * mags.shape[1] * mags.shape[2], mags.shape[3])) print mags.shape idx_helper = range(mags.shape[0]) print len(idx_helper) idx_helper = np.reshape(idx_helper, (caps.shape[0], caps.shape[2], caps.shape[3])) print idx_helper.shape num_to_keep = 100 print 'mags_org.shape', mags_org.shape out_file_html = os.path.join(out_dir_meta, 'mag_sorted_route_weighted.html') im_rows = [] caption_rows = [] for filt_num in range(mags.shape[1]): im_row = [] caption_row = [] mag_curr = mags[:, filt_num] print np.min(mag_curr), np.max(mag_curr) idx_sort = list(np.argsort(mag_curr)[::-1]) idx_sort = idx_sort[:num_to_keep] + idx_sort[-num_to_keep:] sorted_mag_curr = mag_curr[idx_sort] # print sorted_mag_curr[0],sorted_mag_curr[-1] # raw_input() for idx_idx, idx_curr in enumerate(idx_sort): arg_multi_dim = np.where(idx_helper == idx_curr) arg_multi_dim = [arr[0] for arr in arg_multi_dim] # print arg_multi_dim # if arg_multi_dim[1]==0 or arg_multi_dim[1]==5 or arg_multi_dim[2]==0 or arg_multi_dim[2]==5: # continue # arg_multi_dim = [arg_multi_dim[0],max(arg_multi_dim[2],1),max(arg_multi_dim[1],1)] file_curr = os.path.join( out_dir_im, '_'.join([str(val) for val in arg_multi_dim]) + '.jpg') assert os.path.exists(file_curr) im_row.append(util.getRelPath(file_curr, dir_server)) caption_row.append(str(idx_idx) + ' ' + str(filt_num)) # if len(im_row)==num_to_keep: # break im_rows.append(im_row) caption_rows.append(caption_row) visualize.writeHTML(out_file_html, im_rows, caption_rows, 40, 40) print out_file_html.replace(dir_server, click_str)
def save_recon_variants(out_dir_train, model_num, train_data, test_data, gpu_id=0, model_name='alexnet', batch_size_val=None, criterion=nn.CrossEntropyLoss(), margin_params=None, network_params=None, barebones=True): # out_dir_train = out_dir_train, # model_num = model_num_curr, # train_data = train_data, # test_data = test_data, # gpu_id = 0, # model_name = model_name, # batch_size_val = batch_size_val, # criterion = criterion, # margin_params = margin_params, # network_params = network_params,barebones=False mag_range = np.arange(-0.5, 0.6, 0.1) out_dir_results = os.path.join(out_dir_train, 'vary_a_batch_squash_' + str(model_num)) print out_dir_results util.mkdir(out_dir_results) model_file = os.path.join(out_dir_train, 'model_' + str(model_num) + '.pt') log_arr = [] # network = models.get(model_name,network_params) if batch_size_val is None: batch_size_val = len(test_data) test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size_val, shuffle=False, num_workers=1) torch.cuda.device(0) iter_begin = 0 model = torch.load(model_file) model.cuda() model.eval() predictions = [] labels_all = [] out_all = [] caps_all = [] recons_all = {} mean_im = test_data.mean mean_im = mean_im[np.newaxis, :, :] std_im = test_data.std[np.newaxis, :, :] for num_iter, batch in enumerate(test_dataloader): # batch = test_dataloader.next() if criterion == 'marginmulti': labels = Variable(batch['label'].float().cuda()) else: labels = Variable(torch.LongTensor(batch['label']).cuda()) # labels_all.append(batch['label'].numpy()) data = Variable(batch['image'].cuda()) recons_all[(0, 0)] = data.data.cpu().numpy() # labels = Variable(torch.LongTensor(batch['label']).cuda()) # output, caps = all_out = model(data, return_caps=True) caps = all_out[-1] # print caps classes = all_out[0] pred = F.relu(classes - 0.5) pred = torch.ceil(pred) print pred caps_unit = caps / classes.view(classes.size(0), classes.size(1), 1) # classes_unit = (caps_unit ** 2).sum(dim=-1) ** 0.5 labels_made_up = np.zeros((labels.size(0), labels.size(1))) for label_on in range(labels_made_up.shape[1]): labels_made_up = labels_made_up * 0 labels_made_up[:, label_on] = 1 labels_curr = Variable(torch.Tensor(labels_made_up).float().cuda()) for attr_num in range(caps.size(2)): for mag_curr in mag_range: # np.arange(-0.25,0.25,0.05): caps_curr = caps_unit.clone() caps_curr[:, :, attr_num] = mag_curr # caps_mag = (caps_curr ** 2).sum(dim=-1) ** 0.5 # caps_curr = caps_curr/caps_mag.view(caps_mag.size(0),caps_mag.size(1),1) recon_curr = model.just_reconstruct(caps_curr, labels_curr) recons_all[(label_on, attr_num, mag_curr)] = recon_curr.data.cpu().numpy() # for mag in np.arange(0.1,1.1,0.1): # print mag # caps_curr = torch.mul(caps_unit,mag) # print caps_curr.size() # recon_max = model.just_reconstruct(caps_curr,labels) # recons_all[(0,mag)] = recon_max.data.cpu().numpy() # print classes_unit # print classes.shape # print caps.shape print labels labels_keep = np.logical_and(labels.data.cpu().numpy(), pred.data.cpu().numpy()) print labels_keep break # recons_all[(-1,-2)] = all_out[1].data.cpu().numpy() # recons_all[(-1,-1)] = model.just_reconstruct(caps,labels).data.cpu().numpy() # print caps.size() # caps_data = caps.data.cpu().numpy() # for dim_num in range(caps.size(2)): # for inc_curr in np.arange(-0.25,0.30,0.05): # caps = torch.autograd.Variable(torch.Tensor(caps_data)).cuda() # caps[:,:,dim_num]=inc_curr # squared_norm = (caps ** 2).sum(dim=2, keepdim=True) # scale = squared_norm / (1 + squared_norm) # caps = scale * caps / torch.sqrt(squared_norm) # recons_curr = model.just_reconstruct(caps,labels) # recons_all[(dim_num,inc_curr)]=recons_curr.data.cpu().numpy() # # recons_curr = # break ims_html = [] captions_html = [] out_file_html = os.path.join(out_dir_results, 'rel_im_mag_only.html') out_dir_results = out_dir_results.replace(str_replace[0], str_replace[1]) num_im = labels_keep.shape[0] num_labels = labels_keep.shape[1] num_attr = 32 for im_num in range(num_im): # key_curr in recons_all.keys(): # label_on, attr_num, mag_curr = key_curr print im_num, labels_keep[im_num], np.where(labels_keep[im_num]) # raw_input() for label_num in np.where(labels_keep[im_num])[0]: print im_num, label_num for attr_num in range(32): im_row = [] caption_row = [] for mag_curr in mag_range: key_curr = (label_num, attr_num, mag_curr) im_curr = recons_all[key_curr][im_num] out_file = '_'.join([ str(val) for val in [im_num, label_on, attr_num, mag_curr, '.jpg'] ]) out_dir_curr = os.path.join(out_dir_results, 'label_' + str(label_on)) util.mkdir(out_dir_curr) out_file = os.path.join(out_dir_curr, out_file) im_curr = im_curr * std_im + mean_im scipy.misc.imsave(out_file, im_curr[0]) im_row.append('./' + util.getRelPath(out_file, dir_server)) caption_str = '%d %d %d %.2f' % tuple([im_num] + list(key_curr)) caption_row.append(caption_str) # ' '.join([str(val) for val in [im_num,key_str]])) ims_html.append(im_row) captions_html.append(caption_row) visualize.writeHTML(out_file_html, ims_html, captions_html, 96, 96) print out_file_html.replace(dir_server, click_str)
def get_class_variations(model_name, route_iter, pre_pend, strs_append, split_num, model_num, class_rel,type_exp,train_pre = None, test_file = None, au= False): # out_dir_meta = os.path.join('../experiments',model_name+str(route_iter)) # out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_append) # # final_model_file = os.path.join(out_dir_train,'model_'+str(model_num)+'.pt') # train_pre = os.path.join('../data/ck_96','train_test_files') # test_file = os.path.join(train_pre,'test_'+str(split_num)+'.txt') # mean_file = os.path.join(train_pre,'train_'+str(split_num)+'_mean.png') # std_file = os.path.join(train_pre,'train_'+str(split_num)+'_std.png') # data_transforms = {} # data_transforms['val']= transforms.Compose([ # transforms.ToTensor(), # lambda x: x*255. # ]) # test_data = dataset.CK_96_Dataset(test_file, mean_file, std_file, data_transforms['val']) # test_params = dict(out_dir_train = out_dir_train, # model_num = model_num, # train_data = None, # test_data = test_data, # gpu_id = 0, # model_name = model_name, # batch_size_val = None, # criterion = 'margin', # class_rel = class_rel # ) out_dir_meta = os.path.join('../experiments',model_name+str(route_iter)) out_dir_train = os.path.join(out_dir_meta,pre_pend+str(split_num)+strs_append) # final_model_file = os.path.join(out_dir_train,'model_'+str(model_num)+'.pt') if train_pre is None: train_pre = os.path.join('../data/ck_96','train_test_files') if test_file is None: test_file = os.path.join(train_pre,'test_'+str(split_num)+'.txt') else: test_file = os.path.join(train_pre,test_file) mean_file = os.path.join(train_pre,'train_'+str(split_num)+'_mean.png') std_file = os.path.join(train_pre,'train_'+str(split_num)+'_std.png') data_transforms = {} data_transforms['val']= transforms.Compose([ transforms.ToTensor(), lambda x: x*255. ]) if au: test_data = dataset.Bp4d_Dataset_Mean_Std_Im(test_file, mean_file, std_file, resize= 96,transform = data_transforms['val'], binarize = True) else: test_data = dataset.CK_96_Dataset(test_file, mean_file, std_file, data_transforms['val']) # if not au: if au: criterion = 'marginmulti' else: criterion = 'margin' test_params = dict(out_dir_train = out_dir_train, model_num = model_num, train_data = None, test_data = test_data, gpu_id = 0, model_name = model_name, batch_size_val = 128, criterion = criterion, au=au, class_rel = class_rel ) if type_exp ==0 : save_visualizations.save_class_as_other(**test_params) # save_routings(**test_params) out_file_results = os.path.join(out_dir_train,'save_class_as_other_single_batch_'+str(model_num)) out_file_html = os.path.join(out_file_results,'visualizing_class_variations_'+str(class_rel)+'.html') # os.path.join('../scratch/ck_test','save_routings_single_batch_'+str(model_num)) elif type_exp==1: save_visualizations.save_class_vary_mag(**test_params) # save_routings(**test_params) out_file_results = os.path.join(out_dir_train,'save_class_vary_mag_single_batch_'+str(model_num)) out_file_html = os.path.join(out_file_results,'visualizing_vary_mag_'+str(class_rel)+'.html') # os.path.join('../scratch/ck_test','save_routings_single_batch_'+str(model_num)) elif type_exp ==2: save_visualizations.save_class_vary_attr(**test_params) out_file_results = os.path.join(out_dir_train,'save_class_vary_attr_single_batch_'+str(model_num)+'_'+str(class_rel)) out_file_html = os.path.join(out_file_results,'visualizing_vary_attr_'+str(class_rel)+'.html') else: save_visualizations.save_class_vary_mag_class_rel(**test_params) # save_routings(**test_params) out_file_results = os.path.join(out_dir_train,'save_class_vary_mag_single_batch_'+str(model_num)+'_'+str(class_rel)) out_file_html = os.path.join(out_file_results,'visualizing_vary_mag_'+str(class_rel)+'.html') # os.path.join('../scratch/ck_test','save_routings_single_batch_'+str(model_num)) return im_files = np.load(os.path.join(out_file_results,'ims_all.npy')) captions = np.array(im_files) im_files_new = [] captions_new = [] for r in range(im_files.shape[0]): caption_row = [] im_row = [] for c in range(im_files.shape[1]): file_curr = im_files[r,c] caption_row.append(os.path.split(file_curr)[1][:file_curr.rindex('.')]) # print file_curr # print file_curr.replace(str_replace[0],str_replace[1]) # print util.getRelPath(file_curr.replace(str_replace[0],str_replace[1]),dir_server) im_row.append(util.getRelPath(file_curr.replace(str_replace[0],str_replace[1]),dir_server)) # im_files[r,c] = # print im_files[r,c] # raw_input() im_files_new.append(im_row) captions_new.append(caption_row) visualize.writeHTML(out_file_html,im_files_new,captions_new,96,96) print out_file_html.replace(str_replace[0],str_replace[1]).replace(dir_server,click_str)