def script_make_im_gray(): dir_meta = '../data/disfa' out_dir_im = os.path.join(dir_meta, 'Videos_LeftCamera_frames_200') # ../data/disfa/preprocess_im_200_color_align # out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color_align') # out_dir_files_new = os.path.join(dir_meta, 'train_test_files_110_gray_align') out_dir_im_new = os.path.join(dir_meta, 'preprocess_im_110_gray_align') util.mkdir(out_dir_im_new) num_folds = 3 im_size = [110, 110] # [96,96] # all_im = [] # for fold_curr in range(num_folds): # train_file = os.path.join(out_dir_files,'train_'+str(fold_curr)+'.txt') # test_file = os.path.join(out_dir_files,'test_'+str(fold_curr)+'.txt') # all_data = util.readLinesFromFile(train_file)+util.readLinesFromFile(test_file) # all_im = all_im + [line_curr.split(' ')[0] for line_curr in all_data] all_im = glob.glob(os.path.join(out_dir_im, '*', '*.jpg')) print len(all_im), len(set(all_im)) all_im = list(set(all_im)) args = [] for idx_file_curr, file_curr in enumerate(all_im): out_file_curr = file_curr.replace(out_dir_im, out_dir_im_new) dir_curr = os.path.split(out_file_curr)[0] util.makedirs(dir_curr) # print out_file_curr # print dir_curr if not os.path.exists(out_file_curr): args.append((file_curr, out_file_curr, im_size, idx_file_curr)) print len(args) # for arg in args: # print arg # preprocess_bp4d.save_color_as_gray(arg) # raw_input() pool = multiprocessing.Pool(multiprocessing.cpu_count()) pool.map(preprocess_bp4d.save_color_as_gray, args)
def make_rough_anno_mat(test = False): dir_meta_curr = os.path.join(dir_meta, 'val_data') if test: dir_meta_curr = os.path.join(dir_meta, 'test_data') anno_dir = os.path.join(dir_meta_curr, 'annotation') out_dir_anno_rough = os.path.join(dir_meta_curr, 'anno_rough') util.mkdir(out_dir_anno_rough) files_anno = glob.glob(os.path.join(anno_dir,'*.txt')) files_anno = [file_curr for file_curr in files_anno if not os.path.split(file_curr)[1].startswith('Ambiguous')] classes = [os.path.split(file_curr[:file_curr.rindex('_')])[1] for file_curr in files_anno] classes.sort() assert len(classes)==20 dict_annos = {} for file_curr in files_anno: annos = util.readLinesFromFile(file_curr) class_curr = os.path.split(file_curr[:file_curr.rindex('_')])[1] anno_num = classes.index(class_curr) for anno_curr in annos: anno_curr = anno_curr.split() key = anno_curr[0] start = float(anno_curr[1]) end = float(anno_curr[2]) if key not in dict_annos: dict_annos[key] = [[] for idx in range(20)] dict_annos[key][anno_num].append([start, end]) for key in dict_annos.keys(): annos = dict_annos[key] annos = [np.array(anno_curr) for anno_curr in annos] assert len(annos)==20 out_file = os.path.join(out_dir_anno_rough, key+'.npy') # print out_file np.save(out_file, annos)
def script_save_bbox(): dir_meta = '../data/bp4d' in_dir_meta = os.path.join(dir_meta, 'preprocess_im_' + str(256) + '_color_nodetect') out_dir_meta = os.path.join( dir_meta, 'preprocess_im_' + str(256) + '_color_nodetect_bbox') util.mkdir(out_dir_meta) im_list_in = glob.glob(os.path.join(in_dir_meta, '*', '*', '*.jpg')) print len(im_list_in) savegray = False args = [] args = [] file_pairs = [] for idx_im_in, im_in in enumerate(im_list_in): out_file = im_in.replace(in_dir_meta, out_dir_meta).replace('.jpg', '.npy') if os.path.exists(out_file): continue out_dir_curr = os.path.split(out_file)[0] util.makedirs(out_dir_curr) file_pairs.append((im_in, out_file)) chunk_size = 5 chunks = [ file_pairs[x:x + chunk_size] for x in range(0, len(file_pairs), chunk_size) ] args = [(chunk_curr, idx_im_in) for idx_im_in, chunk_curr in enumerate(chunks)] print len(args) # args = args[:1000] for arg in args: print arg size = save_best_bbox_batch(arg) raw_input()
def write_train_test_au(): dir_meta = '../data/pain' au_dir = os.path.join(dir_meta, 'anno_au') facs_dir = os.path.join(dir_meta, 'Frame_Labels','FACS') im_dir = os.path.join(dir_meta, 'preprocess_256_color') gt_pain_dir = os.path.join(dir_meta, 'Sequence_Labels', 'gt_avg') train_test_dir = os.path.join(dir_meta,'train_test_files_loo_1_thresh_au_only') util.mkdir(train_test_dir) strs_replace = [[facs_dir,im_dir],['_facs.txt','.jpg']] people_dirs = [os.path.split(dir_curr)[1] for dir_curr in glob.glob(os.path.join(au_dir,'*'))] people_dirs.sort() for idx_test_dir, test_dir in enumerate(people_dirs): train_file = os.path.join(train_test_dir,'train_'+str(idx_test_dir)+'.txt') test_file = os.path.join(train_test_dir,'test_'+str(idx_test_dir)+'.txt') train_lines = [] num_skipped_all = 0 for idx_people_dir, people_dir in enumerate(people_dirs): if idx_people_dir!=idx_test_dir: # train_dirs = [dir_curr for dir_curr in people_dirs if dir_curr!=test_dir] train_lines_curr, num_skipped = get_threshold_pain_au_only(au_dir,people_dir,gt_pain_dir, strs_replace) num_skipped_all += num_skipped train_lines += train_lines_curr else: test_lines, _ = get_threshold_pain_au_only(au_dir,people_dir,gt_pain_dir, strs_replace,threshold=0) print num_skipped_all print train_file, len(train_lines) print test_file, len(test_lines) # random.shuffle(train_lines) # random.shuffle(test_lines) util.writeFile(train_file, train_lines) util.writeFile(test_file, test_lines)
def create_256_train_test_files(): in_data_dir = '../data/ck_96/train_test_files' out_data_dir = '../data/ck_256/train_test_files' util.mkdir(out_data_dir) num_folds = 10 for split_num in range(0, num_folds): for file_pre in ['train', 'test']: in_file = os.path.join(in_data_dir, file_pre + '_' + str(split_num) + '.txt') out_file = os.path.join(out_data_dir, file_pre + '_' + str(split_num) + '.txt') in_lines = util.readLinesFromFile(in_file) # print in_lines[0] # raw_input() out_lines = [ line_curr.replace(in_data_dir, out_data_dir) for line_curr in in_lines ] print out_file util.writeFile(out_file, out_lines)
def make_train_test_files(): dir_meta = '../data/bp4d' out_dir_subs = os.path.join(dir_meta, 'subs') out_dir_annos = os.path.join(dir_meta, 'anno_text') # out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_nodetect') # out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color_nodetect') # out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color') # out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color') # out_dir_im = os.path.join(dir_meta, 'preprocess_im_96_gray') # out_dir_files = os.path.join(dir_meta, 'train_test_files_96_gray') # out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_align') # out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color_align') # out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_gray_align') # out_dir_files = os.path.join(dir_meta, 'train_test_files_110_gray_align') # out_dir_im = os.path.join(dir_meta, 'preprocess_im_256_color_align') # out_dir_files = os.path.join(dir_meta, 'train_test_files_256_color_align') out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_gray_nodetect') out_dir_files = os.path.join(dir_meta, 'train_test_files_110_gray_nodetect') replace_str = '../data/bp4d/BP4D/BP4D-training' util.mkdir(out_dir_files) num_folds = 3 for fold_num in range(num_folds): for file_pre_str in ['train', 'test']: train_sub_file = os.path.join( out_dir_subs, file_pre_str + '_' + str(fold_num) + '.txt') train_folds = util.readLinesFromFile(train_sub_file) out_file_train = os.path.join( out_dir_files, file_pre_str + '_' + str(fold_num) + '.txt') write_train_file(out_file_train, out_dir_annos, out_dir_im, train_folds, replace_str)
def split_and_save_i3d(): np_file ='../data/i3d_features/Thumos14-I3D-JOINTFeatures.npy' videos_path = '../data/ucf101/val_data/validation' out_dir = '../data/i3d_features/Thumos14-I3D-JOINTFeatures_val' test=False util.mkdir(out_dir) videos_path = '../data/ucf101/test_data/TH14_test_set_mp4' out_dir = '../data/i3d_features/Thumos14-I3D-JOINTFeatures_test' test=True util.mkdir(out_dir) val_videos = glob.glob(os.path.join(videos_path, '*.mp4')) val_videos.sort() print len(val_videos) print val_videos[0] print val_videos[-1] features = np.load(np_file) if test: features = features[-len(val_videos):] else: features = features[:len(val_videos)] print len(features),len(val_videos) for features_idx, features_curr in enumerate(features[:len(val_videos)]): # print features_idx, val_videos[features_idx] # if features_idx<1009: # continue out_file = os.path.join(out_dir,os.path.split(val_videos[features_idx])[1].replace('.mp4','.npy')) # print out_file # print features_curr.shape print features_idx,out_file,features_curr.shape # raw_input() np.save(out_file,features_curr)
def save_sim_viz(vid_name, out_shape_curr, sim_mat, class_idx, out_dir, dataset='ucf'): gt_vals, det_times = get_gt_vector(vid_name, out_shape_curr, class_idx, dataset=dataset) if dataset.startswith('activitynet'): class_names = globals.class_names_activitynet else: class_names = globals.class_names out_dir_curr = os.path.join(out_dir, class_names[class_idx]) util.mkdir(out_dir_curr) pos_rows = sim_mat[gt_vals > 0, :] pos_rows = np.mean(pos_rows, axis=0) neg_rows = sim_mat[gt_vals < 1, :] neg_rows = np.mean(neg_rows, axis=0) # for idx_pos_row, pos_row in enumerate(pos_rows): max_val = max(np.max(pos_rows), np.max(neg_rows)) gt_vals_curr = gt_vals * max_val arr_plot = [(det_times, curr_arr) for curr_arr in [gt_vals_curr, pos_rows, neg_rows]] legend_entries = ['gt', 'pos', 'neg'] # idx_pos_row = str(idx_pos_row) out_file_curr = os.path.join(out_dir_curr, vid_name + '.jpg') title = vid_name # +' '+idx_pos_row # visualize.plotSimple(arr_plot, out_file = out_file_curr, title = title, xlabel = 'time', ylabel = 'max sim', legend_entries = legend_entries) # print out_file_curr # print np.save(out_file_curr.replace('.jpg', '.npy'), sim_mat) visualize.saveMatAsImage(sim_mat, out_file_curr, title=title)
def create_comparative_html_fast(): just_train = True # load files dir_train_test_files = '../data/ucf101/train_test_files' train_file = os.path.join(dir_train_test_files, 'train_just_primary_corrected.txt') test_file = os.path.join(dir_train_test_files, 'test_just_primary_corrected.txt') dir_gt_vecs = '../data/ucf101/gt_vecs/just_primary_corrected' if just_train: out_dir = '../scratch/i3d_dists_just_train' out_dir_coocs = '../scratch/i3d_dists_just_train/arr_coocs_pairwise' out_dir_viz = '../scratch/i3d_dists_just_train/arr_coocs_pairwise_viz' util.mkdir(out_dir_viz) lines = util.readLinesFromFile(train_file) else: out_dir = '../scratch/i3d_dists' lines = util.readLinesFromFile(train_file) + util.readLinesFromFile( test_file) npy_files = [line_curr.split(' ')[0] for line_curr in lines] gt_files = [ os.path.join(dir_gt_vecs, os.path.split(line_curr)[1]) for line_curr in npy_files ] # get vid names per class vid_names_per_class, class_id = get_vid_names_per_class(lines) # print vid_names_per_class out_dir_html = out_dir + '_pairwise_htmls' util.mkdir(out_dir_html) dir_server = '/disk2/maheen_data' out_dir_viz = out_dir_viz.replace('..', os.path.join(dir_server, 'nn_net')) for dir_type in ['fg', 'bg', 'mat']: out_dir_html_curr = os.path.join(out_dir_html, dir_type) util.mkdir(out_dir_html_curr) n_strs = ['n_10', 'n_25', 'n_50', 'n_100'] folders = [ os.path.join(out_dir_viz, n_str, dir_type) for n_str in n_strs ] for idx_class, vid_names_curr in enumerate(vid_names_per_class): class_name = class_names[idx_class] img_names = [class_name + '.jpg'] + [ vid_name + '.jpg' for vid_name in vid_names_curr ] out_file_html = os.path.join(out_dir_html_curr, class_name + '.html') visualize.writeHTMLForDifferentFolders(out_file_html, folders, n_strs, img_names, rel_path_replace=dir_server, height=330, width=400)
def script_median_of_all(): # frame_folder = '../data/intervals_for_extraction_128_128_0.1fps/' # out_folder = '../data/median_bg' # skip_num = 1 frame_folder = '../data/pain_no_pain_x2h_intervals_for_extraction_672_380_0.2fps/' out_folder = '../data/testing_median' skip_num = 100 util.mkdir(out_folder) path_list = get_all_jpg_paths(frame_folder) path_list = path_list[::skip_num] per_cam_lists = [] # Make list of empty lists, one per camera for i in range(NUM_CAMERAS): per_cam_list = [] per_cam_lists.append(per_cam_list) # Read and sort images into the right camera list with tqdm(total=len(path_list)) as pbar: for idx, path in enumerate(path_list): pbar.update(1) camera = int(get_camera_from_path(path)) cam_idx = camera - 1 # Make zero-indexed img = imread(path) per_cam_lists[cam_idx].append(img) # Per camera: convert to array, compute median and save with tqdm(total=NUM_CAMERAS) as pbar: for i in range(NUM_CAMERAS): pbar.update(1) ar = np.asarray(per_cam_lists[i]) med = np.median(ar, axis=0) imsave( os.path.join(out_folder, 'median_0.1fps_camera_{}.jpg'.format(i)), med.astype('uint8'))
def script_make_im_gray(): dir_meta = '../data/bp4d' out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color_align') out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color_align') out_dir_files_new = os.path.join(dir_meta, 'train_test_files_110_gray_align') out_dir_im_new = os.path.join(dir_meta, 'preprocess_im_110_gray_align') util.mkdir(out_dir_files_new) num_folds = 3 im_size = None # [96,96] all_im = [] for fold_curr in range(num_folds): train_file = os.path.join(out_dir_files,'train_'+str(fold_curr)+'.txt') test_file = os.path.join(out_dir_files,'test_'+str(fold_curr)+'.txt') all_data = util.readLinesFromFile(train_file)+util.readLinesFromFile(test_file) all_im = all_im + [line_curr.split(' ')[0] for line_curr in all_data] print len(all_im ),len(set(all_im)) all_im = list(set(all_im)) args = [] for idx_file_curr,file_curr in enumerate(all_im): out_file_curr = file_curr.replace(out_dir_im,out_dir_im_new) dir_curr = os.path.split(out_file_curr)[0] util.makedirs(dir_curr) # print out_file_curr # print dir_curr if not os.path.exists(out_file_curr): args.append((file_curr,out_file_curr,im_size,idx_file_curr)) print len(args) # for arg in args: # print arg # save_color_as_gray(arg) # raw_input() pool = multiprocessing.Pool(multiprocessing.cpu_count()) pool.map(save_color_as_gray,args)
def save_npys(): meta_dir = '../data/charades' out_dir = os.path.join(meta_dir, 'vgg16_rgb_features_npy') util.mkdir(out_dir) train_file = os.path.join(meta_dir, 'train_test_files', 'vgg_16_rgb_test.txt') lines = util.readLinesFromFile(train_file) # t = time.time() for idx_line_curr, line_curr in enumerate(lines): if idx_line_curr % 100 == 0: print idx_line_curr, len(lines) file_curr = line_curr.split(' ')[0] out_file = os.path.join(out_dir, os.path.split(file_curr)[1]).replace( '.npz', '.npy') if not os.path.exists(out_file): feat = np.load(file_curr)['arr_0'] np.save(out_file, feat)
def save_kp_for_alignment(): dir_meta = '../data/bp4d' out_dir_im = os.path.join(dir_meta, 'preprocess_im_110_color') out_dir_files = os.path.join(dir_meta, 'train_test_files_110_color') out_dir_kp = os.path.join(dir_meta,'preprocess_im_110_color_kp') util.mkdir(out_dir_kp) all_im = [] for fold_num in range(3): for file_pre in ['train','test']: file_curr = os.path.join(out_dir_files,file_pre+'_'+str(fold_num)+'.txt') im_list_curr = [line.split(' ')[0] for line in util.readLinesFromFile(file_curr)] all_im.extend(im_list_curr) all_im = list(set(all_im)) print len(all_im) batch_size = 128 batches = [all_im[x:x+batch_size] for x in range(0, len(all_im), batch_size)] total = 0 for b in batches: total +=len(b) assert total==len(all_im) fa=face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, enable_cuda=True, flip_input=False) for idx_im_list, im_list in enumerate(batches): print 'batch',idx_im_list,'of',len(batches) preds = fa.get_landmarks_simple(im_list) # print preds.shape,len(im_list) for idx_im_curr,im_curr in enumerate(im_list): pred_curr = preds[idx_im_curr] # print pred_curr.shape out_file_pred = im_curr.replace(out_dir_im,out_dir_kp).replace('.jpg','.npy') # print out_file_pred,im_curr dir_curr = os.path.split(out_file_pred)[0] util.makedirs(dir_curr) # raw_input() np.save(out_file_pred,pred_curr)
def save_flows_gpu((in_dir_meta, out_dir_u, out_dir_v, video_name, sample, idx)): print idx in_dir_curr = os.path.join(in_dir_meta, video_name) in_files = glob.glob(os.path.join(in_dir_curr, '*.jpg')) in_files.sort() # print len(in_files) in_files = in_files[::sample] # print len(in_files) # optical_flow = cv2.DualTVL1OpticalFlow_create() out_dir_u_curr = os.path.join(out_dir_u, video_name) out_dir_v_curr = os.path.join(out_dir_v, video_name) util.mkdir(out_dir_u_curr) util.mkdir(out_dir_v_curr) out_file_input = os.path.join(out_dir_u_curr, 'list_input.txt') util.writeFile(out_file_input, in_files) out_file_final_u = os.path.join(out_dir_u, video_name, os.path.split(in_files[-2])[1]) out_file_final_v = os.path.join(out_dir_v, video_name, os.path.split(in_files[-2])[1]) # print out_file_final_u if os.path.exists(out_file_final_u) and os.path.exists(out_file_final_v): print video_name, 'DONE already' return 1 # return 0 command = [ './optical_flow_try_2', out_file_input, out_dir_u_curr, out_dir_v_curr ] command = ' '.join(command) # print command # t = time.time() os.popen(command)
def test_resize_kp(): dir_meta = '../data/emotionet' im_size = [256, 256] out_dir_im = os.path.join( dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_nodetect') out_dir_kp = out_dir_im.replace('_im_', '_kp_') out_dir_im_org = os.path.join(dir_meta, 'im') im_file_list = out_dir_im + '_list_1.txt' all_im = util.readLinesFromFile(im_file_list) out_dir_scratch = '../scratch/emotionet_kp' util.mkdir(out_dir_scratch) for idx_im_curr, im_curr in enumerate(all_im[:100]): im_org_file = im_curr.replace(out_dir_im, out_dir_im_org) kp_in_file = im_curr.replace(out_dir_im, out_dir_kp).replace('.jpg', '.npy') if not os.path.exists(kp_in_file): print 'CONTINUING', kp_in_file continue im_org = scipy.misc.imread(im_org_file) im = scipy.misc.imread(im_curr) kp = np.load(kp_in_file) kp_org = kp / float(im_size[0]) kp_org[:, 0] = kp_org[:, 0] * im_org.shape[1] kp_org[:, 1] = kp_org[:, 1] * im_org.shape[0] out_file = os.path.join(out_dir_scratch, str(idx_im_curr) + '.jpg') save_im_kp(im, kp, out_file) out_file_org = os.path.join(out_dir_scratch, str(idx_im_curr) + '_org.jpg') save_im_kp(im_org, kp_org, out_file_org) visualize.writeHTMLForFolder(out_dir_scratch)
def script_save_train_test_files(): meta_dir = '../data/charades' train_test_dir = os.path.join(meta_dir, 'train_test_files') util.mkdir(train_test_dir) anno_files = [ os.path.join(meta_dir, 'annos_test.npz'), os.path.join(meta_dir, 'annos_train.npz') ] # anno_dir = os.path.join(meta_dir, 'vgg16_rgb_features_npy') # out_files = [os.path.join(train_test_dir, 'vgg_16_rgb_npy_test.txt'), # os.path.join(train_test_dir, 'vgg_16_rgb_npy_train.txt')] # anno_dir = os.path.join(meta_dir, 'i3d_rgb') # out_files = [os.path.join(train_test_dir, 'i3d_rgb_npy_test.txt'), # os.path.join(train_test_dir, 'i3d_rgb_npy_train.txt')] # anno_dir = os.path.join(meta_dir, 'i3d_flow') # out_files = [os.path.join(train_test_dir, 'i3d_flow_npy_test.txt'), # os.path.join(train_test_dir, 'i3d_flow_npy_train.txt')] # anno_dir = os.path.join(meta_dir, 'i3d_both') # out_files = [os.path.join(train_test_dir, 'i3d_both_test_wmiss.txt'), # os.path.join(train_test_dir, 'i3d_both_train_wmiss.txt')] anno_dir = os.path.join(meta_dir, 'i3d_charades_both') out_files = [ os.path.join(train_test_dir, 'i3d_charades_both_test.txt'), os.path.join(train_test_dir, 'i3d_charades_both_train_wmiss.txt') ] post_pend = '.npy' # # out_file = for anno_file, out_file in zip(anno_files, out_files): make_train_test_files(anno_file, anno_dir, out_file, post_pend)
def script_download_image(): # idx_url_file,url_file,url_files,str_replace): dir_meta = '../data/emotionet' out_dir_im = os.path.join(dir_meta, 'im') util.mkdir(out_dir_im) str_replace = [ 'http://cbcsnas01.ece.ohio-state.edu/EmotioNet/Images', out_dir_im ] dir_url_files = os.path.join(dir_meta, 'emotioNet_challenge_files_server') url_files = glob.glob(os.path.join(dir_url_files, '*.txt')) url_files.sort() args = [] out_files = [] for idx_url_file, url_file in enumerate(url_files): print 'On file %d of %d' % (idx_url_file, len(url_files)) im = [ line_curr.split('\t')[0] for line_curr in util.readLinesFromFile(url_file) ] # out_files = [im_curr.replace(str_replace[0],str_replace[1]) for im_curr in im] for idx_im_curr, im_curr in enumerate(im): out_file_curr = im_curr.replace(str_replace[0], str_replace[1]) if os.path.exists(out_file_curr): out_files.append(out_file_curr) continue out_dir_curr = os.path.split(out_file_curr)[0] util.makedirs(out_dir_curr) args.append((im_curr, out_file_curr, idx_im_curr)) print len(args) print url_file print len(args) print len(out_files) return out_files
def detect_frames_single_head(): dir_meta = '../data/mmi' dir_server = '/disk3' str_replace = ['..', os.path.join(dir_server, 'maheen_data/eccv_18')] click_str = 'http://vision3.idav.ucdavis.edu:1000' anno_file_new = os.path.join(dir_meta, 'annos_all_oriented.txt') im_size = [110, 110] savegray = True out_dir_im = os.path.join(dir_meta, 'preprocess_im_gray_110') util.mkdir(out_dir_im) str_replace_dir = [os.path.join(dir_meta, 'Sessions'), out_dir_im] annos_all = util.readLinesFromFile(anno_file_new) file_pairs = [] for idx_anno_curr, anno_curr in enumerate(annos_all): im_file, view, emo = anno_curr.split(' ') out_file = im_file.replace(str_replace_dir[0], str_replace_dir[1]) if os.path.exists(out_file) and int(view) != 2: continue out_dir_curr = os.path.split(out_file)[0] util.makedirs(out_dir_curr) file_pairs.append((im_file, out_file)) args = [] chunk_size = 50 chunks = [ file_pairs[x:x + chunk_size] for x in range(0, len(file_pairs), chunk_size) ] args = [(chunk_curr, im_size, savegray, idx_im_in, True) for idx_im_in, chunk_curr in enumerate(chunks)] print len(args) for arg in args: print arg[0] size = preprocess_bp4d.saveCroppedFace_NEW_batch(arg)
def rough_work(): # input = io.imread('../data/bp4d/BP4D/BP4D-training/F001/T1/2440.jpg') dir_im = '../data/bp4d/preprocess_im_110_color/F001/T1' im_list = glob.glob(os.path.join(dir_im, '*.jpg')) # print im_list im_list = im_list[:10] preds = fa.get_landmarks_simple(im_list) out_dir_check = os.path.join('../scratch/check_kp_fan') util.mkdir(out_dir_check) for idx_im_curr, im_curr in enumerate(im_list): im_curr = scipy.misc.imread(im_curr) pts_rel = preds[idx_im_curr] for pt_curr in pts_rel: cv2.circle(im_curr, (int(pt_curr[0]), int(pt_curr[1])), 2, (255, 255, 255), -1) out_file_curr = os.path.join(out_dir_check, str(idx_im_curr) + '.jpg') scipy.misc.imsave(out_file_curr, im_curr) visualize.writeHTMLForFolder(out_dir_check)
def make_folds(): dir_meta = '../data/disfa' dir_im_meta = os.path.join(dir_meta, 'Videos_LeftCamera_frames_200') dir_anno_meta = os.path.join(dir_meta, 'ActionUnit_Labels') # num_folds = 10 # out_dir_folds = os.path.join(dir_meta,'folds_'+str(num_folds)) num_folds = 3 out_dir_folds = os.path.join(dir_meta, 'folds_' + str(num_folds)) util.mkdir(out_dir_folds) list_subs = glob.glob(os.path.join(dir_anno_meta, '*')) list_subs = [os.path.split(sub)[1] for sub in list_subs] list_subs.sort() all_folds = [] for idx_fold in range(num_folds): all_folds.append(list_subs[idx_fold::num_folds]) for idx_fold in range(num_folds): train_file = os.path.join(out_dir_folds, 'train_' + str(idx_fold) + '.txt') test_file = os.path.join(out_dir_folds, 'test_' + str(idx_fold) + '.txt') train_fold = [] for i in range(len(all_folds)): if i != idx_fold: train_fold = train_fold + all_folds[i] test_fold = all_folds[idx_fold] print idx_fold, len(train_fold), len(test_fold) assert len( set(train_fold + test_fold)) == len(train_fold + test_fold) == 27 util.writeFile(train_file, train_fold) util.writeFile(test_file, test_fold)
def make_train_test_subs(): dir_meta = '../data/bp4d' im_dir_meta = os.path.join(dir_meta, 'BP4D', 'BP4D-training') out_dir_subs = os.path.join(dir_meta, 'subs') util.mkdir(out_dir_subs) subs = [ os.path.split(dir_curr)[1] for dir_curr in glob.glob(os.path.join(im_dir_meta, '*')) ] print subs print len(subs) subs.sort() print subs num_splits = 3 folds = [] for fold_num in range(num_splits): fold_curr = subs[fold_num::num_splits] folds.append(fold_curr) for fold_num in range(num_splits): train_folds = [] for idx_fold, fold_curr in enumerate(folds): if idx_fold != fold_num: train_folds = train_folds + fold_curr test_folds = folds[fold_num] out_file_train = os.path.join(out_dir_subs, 'train_' + str(fold_num) + '.txt') out_file_test = os.path.join(out_dir_subs, 'test_' + str(fold_num) + '.txt') assert len(train_folds) + len(test_folds) == len( list(set(train_folds + test_folds))) print fold_num, len(train_folds), len(test_folds) print out_file_train, out_file_test util.writeFile(out_file_train, train_folds) util.writeFile(out_file_test, test_folds)
def graph_overfitting(): log_file_us = '../experiments/graph_multi_video_with_L1_retF/graph_multi_video_with_L1_retF_aft_nonlin_RL_L2_non_lin_None_sparsify_0.5_graph_size_2_sigmoid_False_graph_sum_True_deno_8_n_classes_20_in_out_2048_1024_feat_dim_2048_1024_feat_ret_True_method_cos_ucf/all_classes_False_just_primary_False_limit_None_cw_True_MultiCrossEntropyMultiBranchWithL1_CASL_500_step_250_0.1_0.001_0.001_0.001_lw_1.00_1.00_0.50__noLimit/log_det.txt' log_file_them = '../experiments/graph_multi_video_with_L1_retW/graph_multi_video_with_L1_retW_aft_nonlin_RL_L2_non_lin_None_sparsify_0.5_graph_size_2_sigmoid_False_graph_sum_True_deno_8_n_classes_20_in_out_2048_1024_feat_dim_2048_1024_feat_ret_True_method_cos_ucf/all_classes_False_just_primary_False_limit_None_cw_True_MultiCrossEntropyMultiBranchWithL1_CASL_250_step_250_0.1_0.001_0.001_0.001_lw_1.00_1.00_0.50__caslexp/log_det.txt' log_file_us = '../experiments/graph_multi_video_with_L1_retF_tanh/graph_multi_video_with_L1_retF_tanh_aft_nonlin_RL_L2_non_lin_None_sparsify_percent_0.5_graph_size_1_sigmoid_True_graph_sum_True_deno_8_n_classes_20_in_out_2048_1024_feat_dim_2048_1024_feat_ret_True_method_cos_ucf/all_classes_False_just_primary_False_limit_None_cw_False_MultiCrossEntropyMultiBranchWithL1_CASL_250_step_250_0.1_0.001_0.001_0.001_lw_1.00_1.00_1.00_forplot_0/log_det.txt' log_file_them = '../experiments/graph_multi_video_with_L1_retW_new/graph_multi_video_with_L1_retW_new_aft_nonlin_RL_L2_non_lin_None_sparsify_percent_0.5_graph_size_1_sigmoid_True_graph_sum_True_deno_8_n_classes_20_in_out_2048_1024_feat_dim_2048_1024_feat_ret_True_method_cos_ucf/all_classes_False_just_primary_False_limit_None_cw_False_MultiCrossEntropyMultiBranchWithL1_CASL_250_step_250_0.1_0.001_0.001_0.001_lw_1.00_1.00_1.00_actuallytanh_0/log_det.txt' files = [log_file_us, log_file_them] x_vals = range(0, 251, 10) xAndYs = [] for file_curr in files: lines = util.readLinesFromFile(file_curr) lines = lines[:26] print lines[0] det_vals = [] # for line in lines: # line = [val for val in line.split(' ') if val is not ''] # print line # det_vals.append(float(line[-1])) # raw_input() det_vals = [float(line.split('\t')[-1]) for line in lines] # det_vals = det_vals[::5] xAndYs.append((x_vals, det_vals)) out_file = '../scratch/qualitative_figs_wacv/graph_overfitting.jpg' util.mkdir('../scratch/qualitative_figs_wacv') legend_entries = ['Ours-MCASL', 'CASL-Graph'] xlabel = 'Training Epoch' ylabel = 'Detection Accuracy' title = 'Detection Accuracy at 0.5 Overlap' visualize.plotSimple(xAndYs, out_file=out_file, xlabel=xlabel, ylabel=ylabel, legend_entries=legend_entries, title=title)
def main(): out_dir = '../scratch/check_first_frames_with_cam_on' out_dir_txt = '../scratch/check_first_frames_with_cam_on_txt' out_file_offsets = '../metadata/fixing_offsets_with_cam_on/video_offsets_manual.csv' util.mkdir(out_dir_txt) times_file = os.path.join(out_dir, 'times.npy') im_files_to_check = util.readLinesFromFile( os.path.join(out_dir, 'manual_check.txt')) args = [] out_files = [] for im_file in im_files_to_check: out_file = os.path.join( out_dir_txt, os.path.split(im_file)[1].replace('.jpg', '.txt')) out_files.append(out_file) if not os.path.exists(out_file): args.append((im_file, out_file)) print len(args), len(im_files_to_check) # record_offsets(args) # fix_offset_files(out_files) lines = ['im_file,offset'] for im_file, offset_file in zip(im_files_to_check, out_files): offset = util.readLinesFromFile(offset_file)[0] # sanity check num = int(offset) lines.append(im_file + ',' + offset) print lines print len(lines) print out_file_offsets util.writeFile(out_file_offsets, lines)
def main(video_file, to_run, fps, smooth, post_dir): print video_file print 'to_run', to_run print 'sec', fps # video_file = '../data/Surveillance/ch02_20161212115300.mp4' out_dir = video_file[:video_file.rindex('.')] + '_result_files' + post_dir data_dir = video_file[:video_file.rindex('.')] + '_frames' + post_dir util.mkdir(data_dir) util.mkdir(out_dir) # fps = 5 #extract one frame every n seconds size_output = [416, 416] if to_run == 'all' or to_run == 'extract': print 'EXTRACTING FRAMES' subprocess.call('rm ' + os.path.join(data_dir, '*.jpg'), shell=True) extract_frames(video_file, data_dir, fps, size_output) visualize.writeHTMLForFolder(data_dir) print 'DONE EXTRACTING FRAMES' # t = time.time() if to_run == 'all' or to_run == 'test': print 'TESTING FRAMES' test_frames(out_dir, data_dir) print 'DONE TESTING' if to_run == 'all' or to_run == 'graph': print 'PLOTTING DETECTIONS OVER TIME' plot_detections_over_time(data_dir, out_dir, fps, smooth) # print time.time()-t if to_run == 'plot': print data_dir plot_detections(data_dir, out_dir)
def main(): fps = 10 width = 672 height = 380 str_dir = '_'.join([str(val) for val in [width,height,fps]]) out_dir_testing = '../data/pain_no_pain_x2h_intervals_for_extraction_'+str_dir+'fps' out_dir_flow = '../data_other/pain_no_pain_x2h_intervals_for_extraction_'+str_dir+'fps' util.mkdir(out_dir_testing) ofe = OpticalFlowExtractor(output_dir = out_dir_testing, num_processes = multiprocessing.cpu_count(), output_flow = out_dir_flow) # # extract optical flow # # ofe.extract_frames(replace = False, subjects_to_extract = None) # # add symlinks if storing optical flow in a different drive # ofe.add_symlinks(subjects_to_extract = None) # # collate magnitudes and files names in text files. need to manually run commands files after this step. # ofe.collate_magnitude(subjects_to_extract = ['inkasso']) # ['julia', 'kastanjett', 'naughty_but_nice', 'sir_holger']) # create csv with thresholded images only # ofe.create_thresholded_csv(thresh = 0.7,subjects_to_extract = ['inkasso']) ofe.create_thresholded_csv(thresh = 0.01,subjects_to_extract = None, percent = True)
def script_viz_k_means(): out_dir_htmls = '../experiments/figures/primary_caps_viz_pca'.replace( str_replace[0], str_replace[1]) util.mkdir(out_dir_htmls) out_dir_im = os.path.join(out_dir_htmls, 'im') util.mkdir(out_dir_im) caps, test_file, convnet, imsize = get_caps_compiled() num_clusters = 32 # arr_vals = [(x,y,filter_num) for x in range(6) for y in range(6) for filter_num in range(32)] arr_vals = [(x, y, filter_num) for x in [3] for y in [5] for filter_num in [3]] test_im = [ scipy.misc.imread(line_curr.split(' ')[0]) for line_curr in util.readLinesFromFile(test_file) ] print len(test_im) print test_im[0].shape for x, y, filter_num in arr_vals: out_dir_curr = os.path.join( out_dir_im, str(x) + '_' + str(y) + '_' + str(filter_num)) util.mkdir(out_dir_curr) out_file_html = os.path.join( out_dir_htmls, str(x) + '_' + str(y) + '_' + str(filter_num) + '.html') # if os.path.exists(out_file_html): # continue pca(caps, num_clusters, filter_num, x, y, test_im, out_dir_curr, out_file_html, convnet, imsize, rewrite=False) # break visualize.writeHTMLForFolder(out_dir_im)
def script_viz_mag(): out_dir_htmls = '../experiments/figures/primary_caps_viz'.replace( str_replace[0], str_replace[1]) util.mkdir(out_dir_htmls) out_dir_im = os.path.join(out_dir_htmls, 'im') util.mkdir(out_dir_im) caps, test_file, convnet, imsize = get_caps_compiled() mags = np.linalg.norm(caps, axis=4) print mags.shape print np.min(mags), np.max(mags) test_im = [ scipy.misc.imread(line_curr.split(' ')[0]) for line_curr in util.readLinesFromFile(test_file) ] print len(test_im) print test_im[0].shape for x in range(mags.shape[2]): for y in range(mags.shape[3]): out_file_html = os.path.join(out_dir_htmls, str(x) + '_' + str(y) + '.html') ims_html = [] captions_html = [] for filter_num in range(mags.shape[1]): out_dir_curr = os.path.join( out_dir_im, str(x) + '_' + str(y) + '_' + str(filter_num)) util.mkdir(out_dir_curr) im_row, caption_row = save_ims(mags, filter_num, x, y, test_im, out_dir_curr, convnet, imsize) im_row = [ util.getRelPath(im_curr, dir_server) for im_curr in im_row ] # caption_row = [os.path.split(im_curr)[1][:-4] for im_curr in im_row] ims_html.append(im_row[:10] + im_row[-10:]) captions_html.append(caption_row[:10] + caption_row[-10:]) visualize.writeHTML(out_file_html, ims_html, captions_html, 40, 40)
def save_npys(): out_dir_features = os.path.join(dir_meta, 'i3d') util.mkdir(out_dir_features) out_dir_train = os.path.join(out_dir_features, 'train_data') out_dir_val = os.path.join(out_dir_features, 'val_data') util.mkdir(out_dir_train) util.mkdir(out_dir_val) out_dirs = [out_dir_train, out_dir_val] features = get_i3d() ids, durations, train_val, labels_all, times_all, labels = get_rel_anno_info( ) assert len(features) == len(ids) train_val = np.array(train_val) train_val_bool = np.zeros(train_val.shape).astype(int) train_val_bool[train_val == 'validation'] = 1 for idx_id_curr, id_curr in enumerate(ids): out_file_curr = os.path.join(out_dirs[train_val_bool[idx_id_curr]], id_curr + '.npy') if os.path.exists(out_file_curr): continue features_curr = features[idx_id_curr] duration_curr = durations[idx_id_curr] feature_len = features_curr.shape[0] pred_len = duration_curr * 25 // 16 diff = np.abs(pred_len - feature_len) # diffs.append(diff) if diff > 2: # print 'Continuing',diff, feature_len, pred_len, duration_curr, train_val_bool[idx_id_curr] print id_curr, train_val_bool[idx_id_curr] continue
def main(): # out_dir_meta = '../experiments/bl_khorrami_ck_96_nobn_pixel_augment_255_range' # range_splits = [0,1,2,3,4,5] out_dir_meta = '../experiments/bl_khorrami_ck_96_nobn_pixel_augment_255_range_trans_fix' range_splits = range(6, 10) print range_splits # range(10) util.mkdir(out_dir_meta) all_accuracy = [] for split_num in range_splits: train_file = '../data/ck_96/train_test_files/train_' + str( split_num) + '.txt' test_file = '../data/ck_96/train_test_files/test_' + str( split_num) + '.txt' mean_file = '../data/ck_96/train_test_files/train_' + str( split_num) + '_mean.png' std_file = '../data/ck_96/train_test_files/train_' + str( split_num) + '_std.png' list_of_to_dos = ['flip', 'rotate', 'scale_translate', 'pixel_augment'] mean_im = scipy.misc.imread(mean_file).astype(np.float32) std_im = scipy.misc.imread(std_file).astype(np.float32) batch_size = 128 batch_size_val = None num_epochs = 500 save_after = 100 disp_after = 1 plot_after = 10 test_after = 1 lr = [0.001, 0.001] # lr = [0.0001,0.0001] dec_after = 300 model_name = 'khorrami_ck_96' criterion = nn.CrossEntropyLoss() gpu_id = 0 num_workers = 2 model_num = num_epochs - 1 # model_file = None # epoch_start = 0 # lr_dir_train = lr lr_dir_train = [0.01, 0.01] # strs_append = '_'.join([str(val) for val in [num_epochs,dec_after,lr_dir_train[0],lr_dir_train[1],'100_dec']]) strs_append = '_'.join([ str(val) for val in [num_epochs, dec_after, lr_dir_train[0], lr_dir_train[1]] ]) out_dir_train = os.path.join( out_dir_meta, 'split_' + str(split_num) + '_' + strs_append) print out_dir_train epoch_start = 401 strs_append = '_'.join( [str(val) for val in [400, 300, lr_dir_train[0], lr_dir_train[1]]]) out_dir_res = os.path.join( out_dir_meta, 'split_' + str(split_num) + '_' + strs_append) # strs_append = '_'.join([str(val) for val in [250,200,lr_dir_train[0],lr_dir_train[1]]]) # model_file = os.path.join(out_dir_meta,'split_'+str(split_num)+'_'+strs_append,'model_200.pt') model_file = os.path.join(out_dir_res, 'model_399.pt') # raw_input() util.mkdir(out_dir_train) data_transforms = {} data_transforms['train'] = transforms.Compose([ lambda x: augment_image( x, list_of_to_dos, mean_im=mean_im, std_im=std_im), transforms.ToTensor(), lambda x: x * 255. ]) data_transforms['val'] = transforms.Compose( [transforms.ToTensor(), lambda x: x * 255.]) train_data = dataset.CK_96_Dataset(train_file, mean_file, std_file, data_transforms['train']) test_data = dataset.CK_96_Dataset(test_file, mean_file, std_file, data_transforms['val']) train_model(out_dir_train, train_data, test_data, batch_size=batch_size, batch_size_val=batch_size_val, num_epochs=num_epochs, save_after=save_after, disp_after=disp_after, plot_after=plot_after, test_after=test_after, lr=lr, dec_after=dec_after, model_name=model_name, criterion=criterion, gpu_id=gpu_id, num_workers=num_workers, model_file=model_file, epoch_start=epoch_start) test_model(out_dir_train, model_num, train_data, test_data, model_name=model_name, batch_size_val=batch_size_val, criterion=criterion) res_dir = os.path.join(out_dir_train, 'results_model_' + str(model_num)) log_file = os.path.join(res_dir, 'log.txt') accuracy = util.readLinesFromFile(log_file)[-1] accuracy = float(accuracy.split(' ')[1]) all_accuracy.append(accuracy) print all_accuracy, np.mean(all_accuracy), np.std(all_accuracy)
def save_class_vary_attr(out_dir_train, model_num, train_data, test_data, gpu_id=0, model_name='alexnet', batch_size_val=None, criterion=nn.CrossEntropyLoss(), margin_params=None, network_params=None, barebones=True, class_rel=0, au=False): mag_range = np.arange(-0.5, 0.6, 0.1) out_dir_results = os.path.join( out_dir_train, 'save_class_vary_attr_single_batch_' + str(model_num) + '_' + str(class_rel)) util.makedirs(out_dir_results) out_dir_im = os.path.join(out_dir_results, 'im_save_' + str(class_rel)) util.mkdir(out_dir_im) print out_dir_results model_file = os.path.join(out_dir_train, 'model_' + str(model_num) + '.pt') log_arr = [] # network = models.get(model_name,network_params) if batch_size_val is None: batch_size_val = len(test_data) test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=batch_size_val, shuffle=False, num_workers=1) torch.cuda.device(0) iter_begin = 0 model = torch.load(model_file) model.cuda() model.eval() predictions = [] labels_all = [] out_all = [] caps_all = [] recons_all = {} mean_im = test_data.mean mean_im = mean_im[np.newaxis, :, :] std_im = test_data.std[np.newaxis, :, :] ims_all = [] for num_iter, batch in enumerate(test_dataloader): print 'NUM_ITER', num_iter labels = batch['label'].cpu().numpy() data = batch['image'].cpu().numpy() if au: print labels.shape rel_vals = labels[:, class_rel] > 0 print np.sum(rel_vals) labels = np.zeros((np.sum(rel_vals), labels.shape[1])) labels[:, class_rel] = 1 print labels.shape print rel_vals.shape else: rel_vals = labels == class_rel labels = labels[rel_vals] data = data[rel_vals] batch['image'] = torch.Tensor(data) batch['label'] = torch.LongTensor(labels) print labels.shape print data.shape if data.shape[0] == 1: continue # raw_input() # batch = test_dataloader.next() if criterion == 'marginmulti': labels = Variable(batch['label'].float().cuda()) else: labels = Variable(torch.LongTensor(batch['label']).cuda()) # labels_all.append(batch['label'].numpy()) data = Variable(batch['image'].cuda()) # recons_all[(0,0)] = data.data.cpu().numpy() # labels = Variable(torch.LongTensor(batch['label']).cuda()) # x = model.features(data) # _,routes = model.caps.forward_intrusive(x) # routes = [np.squeeze(routes_curr) for routes_curr in routes] # print len(routes), routes[0].shape # output, caps = classes, reconstructions_gt, _, caps = model(data, labels, return_caps=True) caps_mag = (caps**2).sum(dim=-1)**0.5 caps_unit = caps / caps_mag.view(caps_mag.size(0), caps_mag.size(1), 1) # recons_all_all = [] recons_all = [] for attr_num in range(32): for mag_curr in mag_range: caps_curr = caps_unit.clone() caps_curr[:, :, attr_num] = mag_curr caps_mag_curr = (caps_curr**2).sum(dim=-1)**0.5 caps_curr = caps_curr / caps_mag_curr.view( caps_mag_curr.size(0), caps_mag_curr.size(1), 1) caps_curr = caps_curr * caps_mag.view(caps_mag.size(0), caps_mag.size(1), 1) recon_curr = model.just_reconstruct(caps_curr, labels) recons_all.append(recon_curr) # recons_all_all.append(recons_all) # print caps.size() # raw_input() classes, reconstructions_gt, caps = [ val.data.cpu().numpy() for val in [classes, reconstructions_gt, caps] ] recons_all = [val.data.cpu().numpy() for val in recons_all] labels = labels.data.cpu().numpy() preds = np.argmax(classes, 1) # print preds.shape, labels.shape # print np.sum(preds==labels)/float(labels.size) batch_size = data.shape[0] data = data.data.cpu().numpy() # ims_all = [] # for label_curr in range(8): # rel_idx = np.where(labels==label_curr) # print rel_idx # ims_all_all = [[] for i in range(8)] # ims_all = [] for im_num in range(batch_size): for attr_num in range(32): post_pend = [] im_out = [] gt_label = labels[im_num] pred_label = preds[im_num] # ims_all = ims_all_all[gt_label] # print 'gt',gt_label,'pred',pred_label im_in = data[im_num] # [0] # print im_in.shape # raw_input() im_in = (im_in * std_im) + mean_im # print im_in.shape im_out.append(im_in) post_pend.append(['org']) # recon_gt = reconstructions_gt[im_num] # # [0] # recon_gt = (recon_gt*std_im)+mean_im # # print recon_gt.shape # im_out.append(recon_gt) # post_pend.append(['recon_gt']) # routes_im = [np.sum(route[:,im_num,:,:],2) for route in routes] # # for val in im_out: # # print val.shape # for label_curr in range(len(recons_all)): im_out.append(im_in) post_pend.append(['org']) for idx_mag_curr, mag_curr in enumerate(mag_range): idx_curr = attr_num * len(mag_range) + idx_mag_curr # print idx_curr,len(recons_all) recon_rel = np.array(recons_all[idx_curr][im_num]) recon_rel = (recon_rel * std_im) + mean_im # recon_rel = recon_rel+np.min(recon_rel) # recon_rel = recon_rel/np.max(recon_rel) im_out.append(recon_rel) post_pend.append([attr_num, idx_mag_curr]) # pre_vals = [im_num,gt_label,pred_label] pre_vals = [num_iter, im_num] ims_row = save_all_im(out_dir_im, pre_vals, im_out, post_pend) ims_all.append(ims_row) # print ims_all # print len(ims_all) # print len(ims_all[0]) # raw_input() break # mats_to_save = [] # mats_to_save = [labels,preds,routes[0],routes[1]] # mats_names = ['labels','preds','routes_0','routes_1'] # for mat_curr, file_curr in zip(mats_to_save,mats_names): # out_file_curr = os.path.join(out_dir_results,file_curr+'.npy') # np.save(out_file_curr,mat_curr) np.save(os.path.join(out_dir_results, 'ims_all.npy'), np.array(ims_all))