def make_test_files_manual_anno(): data_dir = '../../data/frames' files = [ os.path.join(data_dir, file_name) for file_name in [ 'ch06_20161212115301_frames_class_ims.txt', 'ch02_20161212115300_frames_class_ims.txt' ] ] all_files = util.readLinesFromFile(files[0]) + util.readLinesFromFile( files[1]) print len(all_files) random.shuffle(all_files) train_files = all_files[:-20] test_files = all_files[-20:] print len(train_files), len(test_files), len(train_files) + len(test_files) # return out_dir = os.path.join(data_dir, 'test_frames') util.mkdir(out_dir) files = [] out_file_list = os.path.join(data_dir, 'test_frames.txt') for file_curr in test_files: file_curr = file_curr.split(' ')[0] out_file = os.path.join(out_dir, os.path.split(file_curr)[1]) shutil.move(file_curr, out_file) file_curr = file_curr.replace('.jpg', '.txt') out_file = out_file.replace('.jpg', '.txt') shutil.move(file_curr, out_file) files.append(out_file) util.writeFile(out_file_list, files)
def fix_offset_files(offset_files): problem_files = [] for offset_file in offset_files: lines = util.readLinesFromFile(offset_file) assert len(lines) > 0 if len(lines) == 1: try: num = int(lines[0]) continue except: problem_files.append(offset_file) else: problem_files.append(offset_file) for problem_file in problem_files: print problem_file lines = util.readLinesFromFile(problem_file) print 'PROBLEM' print lines to_write = [] s = '' while True: s = raw_input() if s == 'q': break to_write.append(s) print problem_file, to_write util.writeFile(problem_file, to_write)
def write_train_test_files(): post_pend = '_untf.txt' data_dir = '../data/ucf101/train_test_files' new_data_path = '../data/untf/npy' old_data_path = '../data/i3d_features/Thumos14-I3D-JOINTFeatures_val' train_file = os.path.join(data_dir, 'train.txt') # old_data_path = '../data/i3d_features/Thumos14-I3D-JOINTFeatures_test' # train_file = os.path.join(data_dir, 'test.txt') replace_str = [old_data_path, new_data_path] out_file = train_file[:train_file.rindex('.')] + post_pend lines = util.readLinesFromFile(train_file) new_lines = [] for idx_line, line in enumerate(lines): # print line line = line.replace(replace_str[0], replace_str[1]) # print line # raw_input() npy_file = line.split(' ')[0] if not os.path.exists(npy_file): print 'skipping', idx_line, npy_file continue assert os.path.exists(npy_file) # raw_input() new_lines.append(line) print out_file, len(new_lines), len(lines) util.writeFile(out_file, new_lines)
def write_cooc_train_test_files(num_neighbors = 100): dir_train_test = '../data/ucf101/train_test_files' in_files = ['train.txt','test.txt'] out_post = '_cooc_'+str(num_neighbors).replace('/','_') cooc_dir = '../data/ucf101/i3d_dists_just_train/arr_coocs_'+str(num_neighbors) for in_file in in_files: just_name = in_file[:in_file.rindex('.')] out_file = os.path.join(dir_train_test, just_name+out_post+'.txt') in_file = os.path.join(dir_train_test, in_file) # print in_file, out_file lines = util.readLinesFromFile(in_file) new_lines = [] for line_curr in lines: line_split = line_curr.split(' ') just_vid_name = os.path.split(line_split[0])[1] just_vid_name = just_vid_name[:just_vid_name.rindex('.')] cooc_file = os.path.join(cooc_dir, just_vid_name+'.npy') assert os.path.exists(cooc_file) line_new = [line_split[0],cooc_file]+line_split[1:] line_new = ' '.join(line_new) new_lines.append(line_new) # print line_new # raw_input() print out_file, len(new_lines), new_lines[0] raw_input() util.writeFile(out_file, new_lines)
def write_cooc_per_class_train_test_files(): dir_train_test = '../data/ucf101/train_test_files' in_files = ['train.txt','test.txt'] out_post = '_cooc_per_class' cooc_dir = '../data/ucf101/i3d_dists_just_train/arr_coocs_per_class' for in_file in in_files: just_name = in_file[:in_file.rindex('.')] out_file = os.path.join(dir_train_test, just_name+out_post+'.txt') in_file = os.path.join(dir_train_test, in_file) # print in_file, out_file lines = util.readLinesFromFile(in_file) new_lines = [] for line_curr in lines: line_split = line_curr.split(' ') just_vid_name = os.path.split(line_split[0])[1] just_vid_name = just_vid_name[:just_vid_name.rindex('.')] line_new = [line_split[0]] # ,cooc_file]+line_split[1:] for class_name in class_names: cooc_file = os.path.join(cooc_dir,class_name, just_vid_name+'.npz') assert os.path.exists(cooc_file) line_new.append(cooc_file) line_new +=line_split[1:] line_new = ' '.join(line_new) new_lines.append(line_new) # print line_new # raw_input() print out_file, len(new_lines), new_lines[0] raw_input() util.writeFile(out_file, new_lines)
def save_test_pair_file(): dir_train_test_files = '../data/ucf101/train_test_files' num_to_pick = 20 train_file = os.path.join(dir_train_test_files,'train_ultra_correct.txt') test_file = os.path.join(dir_train_test_files,'test_ultra_correct.txt') train_data = util.readLinesFromFile(train_file) test_data = util.readLinesFromFile(test_file) test_new_data = [] for line_curr in test_data: # rand_idx = random.randint(0,len(train_data)-1) num_rand = random.sample(xrange(len(train_data)), num_to_pick) for rand_idx in num_rand: # for line_train in train_data: test_new_data.append(line_curr) test_new_data.append(train_data[rand_idx]) print len(test_new_data) print len(test_data) print len(train_data) print len(test_data)*len(train_data)*2 out_file = os.path.join(dir_train_test_files,'test_pair_rand'+str(num_to_pick)+'_ultra_correct.txt') print out_file # raw_input() util.writeFile(out_file, test_new_data)
def write_train_file(out_file_train, out_dir_annos, out_dir_im, train_folds, replace_str): all_anno_files = [] for sub_curr in train_folds: all_anno_files = all_anno_files + glob.glob( os.path.join(out_dir_annos, sub_curr + '*.txt')) all_lines = [] for anno_file in all_anno_files: all_lines = all_lines + util.readLinesFromFile(anno_file) out_lines = [] total_missing = 0 for line_curr in all_lines: out_line = line_curr.replace(replace_str, out_dir_im) im_out = out_line.split(' ')[0] if os.path.exists(im_out): # assert os.path.exists(im_out) out_lines.append(out_line) else: # print im_out total_missing += 1 print total_missing print len(out_lines) print out_lines[0] random.shuffle(out_lines) util.writeFile(out_file_train, out_lines)
def get_manual_anno_classes(): frame_dir = '../../data/frames/ch06_20161212115301_frames' # ch02_20161212115300_frames' label_dir = os.path.join(frame_dir, 'labels') out_dir = os.path.join(frame_dir, 'labels_with_class') util.mkdir(out_dir) im_files = glob.glob(os.path.join(frame_dir, '*.jpg')) im_files.sort() print len(im_files) plt.ion() for im_num, im_file in enumerate(im_files): print im_file, im_num, len(im_files) label_file = im_file.replace(frame_dir, label_dir).replace('.jpg', '.txt') out_file = im_file.replace(frame_dir, out_dir).replace('.jpg', '.txt') assert os.path.exists(label_file) label_info = util.readLinesFromFile(label_file) num_boxes = int(label_info[0]) if num_boxes > 0: assert num_boxes == 1 box = [int(val) for val in label_info[1].split(' ')] im = scipy.misc.imread(im_file) # im = cv2.rectangle(im,(box[0],box[1]),(box[2],box[3]),(255,0,0),5) plt.figure() plt.imshow(im) val = raw_input('whats the class') label_info.append(val) util.writeFile(out_file, label_info) plt.close()
def get_correct_for_au(): model_name = 'vgg_capsule_7_33/bp4d_256_train_test_files_256_color_align_0_reconstruct_True_True_all_aug_marginmulti_False_wdecay_0_1_exp_0.96_350_1e-06_0.0001_0.001_0.001_lossweights_1.0_0.1_True' model_file_name = 'model_0.pt' results_dir = os.path.join( '../../eccv_18/experiments', model_name, 'results_' + model_file_name[:model_file_name.rindex('.')]) assert os.path.exists(results_dir) out_dir = os.path.join('../experiments', model_name, 'au_info') util.makedirs(out_dir) test_file = '../data/bp4d/train_test_files_256_color_align/test_0.txt' test_im = [ line_curr.split(' ')[0] for line_curr in util.readLinesFromFile(test_file) ] test_im = np.array(test_im) labels, preds = ca.collate_files([results_dir]) preds[preds <= 0.5] = 0 preds[preds > 0.5] = 1 for au_num in range(labels.shape[1]): bin_keep = np.logical_and(labels[:, au_num] == preds[:, au_num], labels[:, au_num] == 1) im_keep = test_im[bin_keep] out_file = os.path.join(out_dir, 'correct_' + str(au_num) + '.txt') util.writeFile(out_file, im_keep) print out_file print au_num print bin_keep.shape, np.sum(bin_keep), im_keep.shape
def check_done(in_dir_meta, out_dir_u, out_dir_v, video_name, sample, idx): # print idx in_dir_curr = os.path.join(in_dir_meta, video_name) in_files = glob.glob(os.path.join(in_dir_curr, '*.jpg')) in_files.sort() # print len(in_files) in_files = in_files[::sample] # print len(in_files) # optical_flow = cv2.DualTVL1OpticalFlow_create() out_dir_u_curr = os.path.join(out_dir_u, video_name) out_dir_v_curr = os.path.join(out_dir_v, video_name) util.mkdir(out_dir_u_curr) util.mkdir(out_dir_v_curr) out_file_input = os.path.join(out_dir_u_curr, 'list_input.txt') util.writeFile(out_file_input, in_files) out_file_final_u = os.path.join(out_dir_u, video_name, os.path.split(in_files[-2])[1]) out_file_final_v = os.path.join(out_dir_v, video_name, os.path.split(in_files[-2])[1]) # print out_file_final_u if os.path.exists(out_file_final_u) and os.path.exists(out_file_final_v): print video_name, 'DONE already' return 1 return 0
def scratch_1(): dir_server = '/disk3' horse_dir = os.path.join(dir_server, 'maheen_data', 'eccv_18', 'data/horse_data_cleaned') train_file = os.path.join(horse_dir, 'trainImageList.txt') test_file = os.path.join(horse_dir, 'valImageList.txt') out_dirs = [ '../../data/horse_data_cleaned_yolo/train', '../../data/horse_data_cleaned_yolo/test' ] # for file_curr, out_dir in zip([train_file,test_file],out_dirs): # util.makedirs(out_dir) # save_im_anno(file_curr,out_dir) to_replace = ['../../', '../'] for out_dir in out_dirs: out_file = out_dir + '_all_face.txt' ims = glob.glob(os.path.join(out_dir, '*.jpg')) print len(ims), ims[0] ims = [ im_curr.replace(to_replace[0], to_replace[1]) for im_curr in ims ] print ims[0] print out_file util.writeFile(out_file, ims)
def looking_at_gt_pain(): dir_meta = '../data/pain' gt_pain_anno_dir = os.path.join(dir_meta, 'Sequence_Labels') out_dir_meta = os.path.join(gt_pain_anno_dir,'gt_avg') util.mkdir(out_dir_meta) gt_pain_anno_dirs = [os.path.join(gt_pain_anno_dir,dir_curr) for dir_curr in ['AFF','VAS','SEN']] min_maxs = [[1,14],[0,10],[1,14]] out_range = [0,10] sequence_names = [dir_curr.replace(gt_pain_anno_dirs[0]+'/','') for dir_curr in glob.glob(os.path.join(gt_pain_anno_dirs[0],'*','*.txt'))] # for gt_pain_anno_dir in gt_pain_anno_dirs: for sequence_name in sequence_names: pain_levels = [] for gt_pain_anno_dir,min_max in zip(gt_pain_anno_dirs,min_maxs): # print sequence_name,gt_pain_anno_dir file_curr = os.path.join(gt_pain_anno_dir,sequence_name) # print file_curr pain_val = int(float(util.readLinesFromFile(file_curr)[0])) pain_val = (pain_val-min_max[0])/float(min_max[1]-min_max[0]) * (out_range[1]-out_range[0])+out_range[0] pain_levels.append(pain_val) # print pain_levels avg_pain = np.mean(pain_levels) out_file_curr = os.path.join(out_dir_meta,sequence_name) util.makedirs(os.path.split(out_file_curr)[0]) util.writeFile(out_file_curr,[str(avg_pain)])
def just_replace_strings_train_test(): dir_meta = '../data/disfa' old_out_dir_train_test = os.path.join( dir_meta, 'train_test_8_au_all_method_256_color_align') new_out_dir_train_test = os.path.join( dir_meta, 'train_test_8_au_all_method_110_color_align') file_names = [ file_pre + '_' + str(file_post) + '.txt' for file_pre in ['train', 'test'] for file_post in range(3) ] old_im_dir = os.path.join(dir_meta, 'preprocess_im_256_color_align') new_im_dir = os.path.join(dir_meta, 'preprocess_im_110_color_align') util.mkdir(new_out_dir_train_test) for file_name in file_names: in_file = os.path.join(old_out_dir_train_test, file_name) out_file = os.path.join(new_out_dir_train_test, file_name) lines = util.readLinesFromFile(in_file) lines = [line.replace(old_im_dir, new_im_dir) for line in lines] for line in lines: im = line.split(' ')[0] assert os.path.exists(im) print out_file, os.path.exists(out_file), lines[0], len(lines) # raw_input() util.writeFile(out_file, lines)
def record_offsets(args): plt.ion() plt.figure() for idx_im, (im_file, out_file) in enumerate(args): print idx_im, 'of', len(args) im = Image.open(im_file).crop((0, 0, 1000, 200)) im_time = os.path.split(im_file)[1][-10:-4] plt.imshow(im) title = 'If frame more than vid than NEGATIVE\n' + im_time plt.title(title) plt.show() to_write = [] s = '' while True: s = raw_input() if s == 'q': break to_write.append(s) print out_file, to_write util.writeFile(out_file, to_write) raw_input() plt.close()
def create_sess_facs_anno(out_file_curr, dir_curr, facs_to_keep): files_all = glob.glob(os.path.join(dir_curr,'*.txt')) strs_all = [] for file_curr in files_all: anno = parse_facs_file(file_curr) if anno.size>0 and np.sum(np.isin(anno[:,0],facs_to_keep))>0: str_curr = [] for fac_curr in facs_to_keep: anno_rel = anno[anno[:,0]==fac_curr,:2] if anno_rel.size>0: assert anno_rel.shape[0]==1 str_curr.append(anno_rel[0,1]) else: str_curr.append(0) else: str_curr = [0]*len(facs_to_keep) str_curr = ' '.join([str(val) for val in [file_curr]+str_curr]) strs_all.append(str_curr) util.writeFile(out_file_curr,strs_all)
def sort_auto_and_manual_checks(im_files, times_arr, out_file_auto, out_file_manual_check): # times_arr = np.load(times_file) times_arr_idx = list(times_arr[:, 0]) im_check = [] im_auto = ['im_file,offset'] # convert to time stamps for idx_im_file, im_file in enumerate(im_files): if idx_im_file in times_arr_idx: idx = times_arr_idx.index(idx_im_file) row = times_arr[idx] assert row[0] == idx_im_file diff_val = check_diff_reasonable(row[1:], 3) if diff_val is not None: im_auto.append(im_file + ',' + str(diff_val)) else: im_check.append(im_file) else: im_check.append(im_file) print(im_check) print(im_auto) print(len(im_check)) print(len(im_auto)) assert (len(im_check) + len(im_auto) - 1 == len(im_files)) util.writeFile(out_file_auto, im_auto) util.writeFile(out_file_manual_check, im_check)
def make_train_test_subs(): dir_meta = '../data/bp4d' im_dir_meta = os.path.join(dir_meta,'BP4D','BP4D-training') out_dir_subs = os.path.join(dir_meta,'subs') util.mkdir(out_dir_subs) subs = [os.path.split(dir_curr)[1] for dir_curr in glob.glob(os.path.join(im_dir_meta,'*'))] print subs print len(subs) subs.sort() print subs num_splits = 3 folds = [] for fold_num in range(num_splits): fold_curr = subs[fold_num::num_splits] folds.append(fold_curr) for fold_num in range(num_splits): train_folds = [] for idx_fold,fold_curr in enumerate(folds): if idx_fold!=fold_num: train_folds = train_folds+fold_curr test_folds = folds[fold_num] out_file_train = os.path.join(out_dir_subs,'train_'+str(fold_num)+'.txt') out_file_test = os.path.join(out_dir_subs,'test_'+str(fold_num)+'.txt') assert len(train_folds)+len(test_folds)==len(list(set(train_folds+test_folds))) print fold_num, len(train_folds),len(test_folds) print out_file_train, out_file_test util.writeFile(out_file_train, train_folds) util.writeFile(out_file_test, test_folds)
def test_frames(out_dir, data_dir): in_data_file = 'horse_face/side_horse_face_video_template.data' config_file = 'horse_face/yolo_two_class.cfg' model_file = '../experiments/yolo_side_horse/yolo_two_class_1000.weights' test_images = glob.glob(os.path.join(data_dir, '*.jpg')) test_images.sort() test_file = os.path.join(out_dir, 'test_images.txt') util.writeFile(test_file, test_images) # result_log_file = os.path.join(out_dir,'result_log.txt') out_data_file = os.path.join(out_dir, 'side_horse_face.data') with open(in_data_file, 'rb') as f: lines = f.read() lines = lines.replace('$RESULT$', out_dir) lines = lines.replace('$DATA$', data_dir) with open(out_data_file, 'wb') as f: f.write(lines) command = [] command.extend(['./darknet', 'detector', 'test_bbox']) command.append(out_data_file) command.append(config_file) command.append(model_file) command.extend(['<', test_file]) # ,'>']) # command.append(result_log_file) command.extend(['-thresh', '0.2']) command = ' '.join(command) print command subprocess.call(command, shell=True)
def script_change_train_test(): dir_meta = '../data/disfa' out_dir_files = os.path.join(dir_meta, 'train_test_10_6_method') out_dir_files_new = os.path.join(dir_meta, 'train_test_10_6_method_110_gray_align') util.mkdir(out_dir_files_new) out_dir_im = os.path.join(dir_meta, 'Videos_LeftCamera_frames_200') out_dir_im_new = os.path.join(dir_meta, 'preprocess_im_110_gray_align') num_folds = 10 for fold_curr in range(num_folds): for file_pre in ['train', 'test']: file_curr = os.path.join(out_dir_files, file_pre + '_' + str(fold_curr) + '.txt') lines = util.readLinesFromFile(file_curr) out_file = os.path.join(out_dir_files_new, file_pre + '_' + str(fold_curr) + '.txt') out_lines = [] for line_curr in lines: out_line = line_curr.replace(out_dir_im, out_dir_im_new) im_curr = line_curr.split(' ')[0] assert os.path.exists(im_curr) out_lines.append(out_line) # print out_lines[0] print len(out_lines) print out_file # raw_input() util.writeFile(out_file, out_lines)
def merge_emo_facs(emo_file, facs_file, out_file, list_au_keep, idx_map): assert os.path.exists(emo_file) assert os.path.exists(facs_file) emo_lines = util.readLinesFromFile(emo_file) facs_lines = util.readLinesFromFile(facs_file) au_bin = np.zeros((len(emo_lines), np.max(idx_map) + 1)) print 'au_bin.shape', au_bin.shape emo_ims = [line.split(' ')[0] for line in emo_lines] facs_ims = [line.split(' ')[0] for line in facs_lines] for idx_facs, facs_im in enumerate(facs_ims): idx_emo = emo_ims.index(facs_im) facs = facs_lines[idx_facs] facs = [int(val) for val in facs.split(' ')[1:]] facs = facs[::2] found = 0 for facs_curr in facs: if facs_curr in list_au_keep: found = 1 idx_au = list_au_keep.index(facs_curr) au_bin[idx_emo, idx_map[idx_au]] = 1 if not found: print facs_lines[idx_facs] print emo_lines[idx_emo] raw_input() facs_bin = np.sum(au_bin, axis=1, keepdims=True) print facs_bin.shape, np.min(facs_bin), np.max(facs_bin), np.sum( facs_bin > 0) facs_bin[facs_bin > 0] = 1 print np.sum(facs_bin), len(facs_lines), np.sum(facs_bin) == len( facs_lines) print np.sum(au_bin, 0) out_mat = np.concatenate((facs_bin, au_bin), 1) print out_mat.shape assert out_mat.shape[0] == len(emo_lines) assert out_mat.shape[1] == np.max(idx_map) + 2 if os.path.split(out_file)[1].startswith('train'): assert np.all(np.sum(au_bin, 0) > 0) out_lines = [] for idx_emo_line, emo_line in enumerate(emo_lines): facs_arr_str = [str(int(val)) for val in list(out_mat[idx_emo_line])] out_line = emo_line + ' ' + ' '.join(facs_arr_str) # print out_line # raw_input() out_lines.append(out_line) print out_file util.writeFile(out_file, out_lines)
def make_train_test_splits(): data_dir_meta = '../data/horse_51' anno_file = os.path.join(data_dir_meta, 'Painscore_expert_51images.csv') im_dir = data_dir_meta out_dir_split = os.path.join(data_dir_meta, 'train_test_split') util.mkdir(out_dir_split) anno = util.readLinesFromFile(anno_file) anno = anno[1:] anno = [[int(anno_curr.split(',')[val]) for val in [0, -2, -1]] for anno_curr in anno] anno = np.array(anno) exclude_idx = anno[:, 1] != anno[:, 2] print np.sum(exclude_idx) kept_files = anno[~exclude_idx, :] pos_idx = np.where(kept_files[:, 1] == 1)[0] neg_idx = np.where(kept_files[:, 1] == 0)[0] num_splits = 5 size_pos = pos_idx.shape[0] size_neg = neg_idx.shape[0] split_pos_size = int(math.floor(size_pos * 0.2)) split_neg_size = int(math.floor(size_neg * 0.2)) np.random.shuffle(pos_idx) np.random.shuffle(neg_idx) pos_idx_split = np.array_split(pos_idx, num_splits) neg_idx_split = np.array_split(neg_idx, num_splits) for split_num in range(num_splits): arr_idx = [idx for idx in range(num_splits) if idx != split_num] idx_train = np.concatenate([pos_idx_split[val] for val in arr_idx] + [neg_idx_split[val] for val in arr_idx]) idx_test = np.array( list(pos_idx_split[split_num]) + list(neg_idx_split[split_num])) idx_all = [idx_train, idx_test] out_files = [ os.path.join(out_dir_split, 'train_' + str(split_num) + '.txt'), os.path.join(out_dir_split, 'test_' + str(split_num) + '.txt') ] for idx_curr, out_file_curr in zip(idx_all, out_files): annos = kept_files[idx_curr, :] assert np.all(annos[:, 1] == annos[:, 2]) lines_all = [ os.path.join(im_dir, str(anno_curr[0]) + '.jpg') + ' ' + str(anno_curr[1]) for anno_curr in annos ] print len(lines_all), out_file_curr, lines_all[0] util.writeFile(out_file_curr, lines_all)
def make_disfa_10_16_anno(): dir_meta = '../data/disfa' dir_im_meta = os.path.join(dir_meta, 'Videos_LeftCamera_frames_200') pre_im_str = 'LeftVideo' post_im_str = '_comp' dir_anno_meta = os.path.join(dir_meta, 'ActionUnit_Labels') out_dir_volume = os.path.join(dir_meta, 'anno_volume') out_dir_annos = os.path.join(dir_meta, 'sub_annos_10_6_method') util.mkdir(out_dir_annos) list_subs = glob.glob(os.path.join(dir_anno_meta, '*')) list_subs = [os.path.split(sub)[1] for sub in list_subs] aus_keep = [1, 2, 4, 6, 9, 12, 15, 17, 25, 26] aus_keep.sort() list_aus = [1, 12, 15, 17, 2, 20, 25, 26, 4, 5, 6, 9] list_aus.sort() idx_keep = [1 if au in aus_keep else 0 for au in list_aus] total_reasonable = 0 for sub in list_subs: im_dir = os.path.join(dir_im_meta, pre_im_str + sub + post_im_str) im_files = glob.glob(os.path.join(im_dir, '*.jpg')) im_files.sort() volume_file = os.path.join(out_dir_volume, sub + '.npy') volume = np.load(volume_file) assert volume.shape[0] == len(im_files) volume = volume[:, np.array(idx_keep) > 0] volume_sum = np.sum(volume, 1) to_keep = volume_sum > 6 print to_keep.shape, np.sum(to_keep) total_reasonable += np.sum(to_keep) im_files = np.array(im_files) im_files_to_keep = im_files[to_keep] volume_to_keep = volume[to_keep, :] out_file_sub = os.path.join(out_dir_annos, sub + '.txt') lines = [] for idx in range(im_files_to_keep.shape[0]): im_file = im_files_to_keep[idx] volume_file = volume_to_keep[idx, :] anno = [im_file] + [str(int(val)) for val in volume_file] anno = ' '.join(anno) lines.append(anno) print len(lines), out_file_sub util.writeFile(out_file_sub, lines) print total_reasonable
def back_bone_commands(): # train_horses = ['aslan', 'brava', 'herrera', 'inkasso', 'julia', 'kastanjett', 'naughty_but_nice', 'sir_holger'] # test_horses_all = ['aslan', 'brava', 'herrera', 'inkasso', 'julia', 'kastanjett', 'naughty_but_nice', 'sir_holger'] # # ['herrera','julia','naughty_but_nice'] # # ['inkasso', 'kastanjett', 'sir_holger'] # # # config_file = 'configs/config_train_rotation_newCal.py' # data_path = '../data/pain_no_pain_x2h_intervals_for_extraction_128_128_2fps/' # # data_path = '../data/pain_no_pain_x2h_intervals_for_extraction_672_380_0.2fps_crop/' # job_name = 'withRotNewCal' # util.mkdir('to_runs') train_horses = ['aslan', 'brava', 'herrera', 'inkasso', 'julia', 'kastanjett', 'naughty_but_nice', 'sir_holger'] test_horses_all = ['aslan', 'brava', 'herrera', 'inkasso', 'julia', 'kastanjett', 'naughty_but_nice', 'sir_holger'] # ['herrera','julia','naughty_but_nice'] # ['inkasso', 'kastanjett', 'sir_holger'] # config_file = 'configs/config_rotcrop_debug.py' # data_path = '../data/pain_no_pain_x2h_intervals_for_extraction_128_128_2fps/' data_path = '../data/pain_no_pain_x2h_intervals_for_extraction_672_380_0.2fps_crop/' job_name = 'withRotCropDebug' util.mkdir('to_runs') num_gpus = 2 num_per_gpu = 4 for idx in range(num_gpus): test_horses = test_horses_all[num_per_gpu*idx:num_per_gpu*idx+num_per_gpu] out_file = os.path.join('to_runs','to_run_'+job_name+'_'+str(idx)) print (out_file) # test_horses = ['brava', 'herrera'] # test_horses = ['inkasso','julia'] # test_horses = ['kastanjett', 'naughty_but_nice'] # test_horses = ['sir_holger'] # # # # , 'inkasso','julia'] # , 'kastanjett', 'naughty_but_nice', 'sir_holger'] # test_horses = ['aslan', 'brava', 'herrera', 'inkasso'] commands = [] for test_subject in test_horses: train_subjects = [x for x in train_horses if x is not test_subject] str_com = ['python','train_encode_decode.py'] str_com+= ['--config_file', config_file] str_com+= ['--dataset_path', data_path] str_com+= ['--train_subjects', '/'.join(train_subjects)] str_com+= ['--test_subjects', test_subject] str_com+= ['--job_identifier', job_name] str_com = ' '.join(str_com) commands.append(str_com) print (str_com) # print (commands) util.writeFile(out_file, commands)
def script_get_video_durations(): dir_data = os.path.join(dir_meta,'test_data','TH14_test_set_mp4') out_file_durations = os.path.join(dir_meta,'test_data','durations.txt') video_list = glob.glob(os.path.join(dir_data,'*.mp4')) video_list.sort() pool = multiprocessing.Pool(multiprocessing.cpu_count()) durations = pool.map(get_video_duration,video_list) lines = [video_list[idx_curr]+' '+str(duration_curr) for idx_curr,duration_curr in enumerate(durations)] util.writeFile(out_file_durations,lines)
def get_non_peak_im_list(): dir_meta_96 = '../data/ck_96' dir_meta = '../data/ck_original/cohn-kanade-images' out_file = os.path.join(dir_meta, 'non_peak_one_third.txt') str_replace = [os.path.join(dir_meta_96, 'im'), dir_meta] ims = glob.glob(os.path.join(dir_meta, '*', '*', '*.png')) print len(ims) dirs_all = [os.path.split(im_curr)[0] for im_curr in ims] dirs_all = list(set(dirs_all)) print len(dirs_all) dirs_needed = [] all_files = [] train_file = os.path.join(dir_meta_96, 'train_test_files', 'train_0.txt') test_file = os.path.join(dir_meta_96, 'train_test_files', 'test_0.txt') # all_files = [train_file,test_file] all_im = [] for file_curr in [train_file, test_file]: lines = util.readLinesFromFile(file_curr) all_im = all_im + [line_curr.split(' ')[0] for line_curr in lines] print len(all_im), len(list(set(all_im))) just_dirs = [os.path.split(im_curr)[0] for im_curr in all_im] just_dirs = list(set(just_dirs)) print len(just_dirs) print just_dirs[0] just_dirs = [ dir_curr.replace(str_replace[0], str_replace[1]) for dir_curr in just_dirs ] ims_all = [] diffs = [] for dir_curr in just_dirs: ims_curr = glob.glob(os.path.join(dir_curr, '*.png')) ims_curr.sort() total_ims = len(ims_curr) # end_select = total_ims-3 # start_select = max(end_select-3,0) start_select = total_ims // 3 end_select = start_select + 3 ims_select = ims_curr[start_select:end_select] assert len(ims_select) == 3 diffs.append(start_select + 3 - len(ims_curr)) ims_all.extend(ims_select) diffs = np.array(diffs) print np.min(diffs), np.max(diffs) print len(ims_all) print ims_all[0] util.writeFile(out_file, ims_all)
def save_flipped_images(): dir_meta = '../data/mmi' dir_server = '/disk3' str_replace = ['..', os.path.join(dir_server, 'maheen_data/eccv_18')] click_str = 'http://vision3.idav.ucdavis.edu:1000' anno_file = os.path.join(dir_meta, 'annos_all.txt') anno_file_new = os.path.join(dir_meta, 'annos_all_oriented.txt') no_flip = ['S054', 'S053'] opp_flip = ['S021'] annos = util.readLinesFromFile(anno_file) annos_new = [] for line_curr in annos: im_file, view, emo = line_curr.split(' ') if int(view) == 2: out_im_file = im_file elif int(view) == 1: assert opp_flip[0] in im_file # continue out_dir_curr = os.path.split(im_file)[0] + '_oriented' util.mkdir(out_dir_curr) out_im_file = os.path.join(out_dir_curr, os.path.split(im_file)[1]) if os.path.exists(out_im_file): continue im = scipy.misc.imread(im_file) im = np.transpose(im, (1, 0, 2)) im = im[::-1, :, :] scipy.misc.imsave(out_im_file, im) else: if no_flip[0] in im_file or no_flip[1] in im_file: out_im_file = im_file continue else: out_dir_curr = os.path.split(im_file)[0] + '_oriented' util.mkdir(out_dir_curr) out_im_file = os.path.join(out_dir_curr, os.path.split(im_file)[1]) if os.path.exists(out_im_file): im = scipy.misc.imread(im_file) im = np.transpose(im, (1, 0, 2)) im = im[:, ::-1, :] scipy.misc.imsave(out_im_file, im) # raw_input() annos_new.append(' '.join([out_im_file, view, emo])) util.writeFile(anno_file_new, annos_new)
def some_tf_script(): out_file_sh = 'capsules/test_em_all.sh' all_commands = [] for model_num in range(100,1100,100): # command_curr = 'python experiment.py --data_dir=../../data/ck_96/train_test_files_tfrecords/test_0.tfrecords --train=false --summary_dir=../../experiments/sabour_mnist/ck_attempt_0/results_%s --checkpoint=../../experiments/sabour_mnist/ck_attempt_0/train/model.ckpt-%s --hparams_override remake=0 --dataset ck' % (str(model_num),str(model_num)) command_curr = 'python experiment.py --data_dir=../../data/ck_96/train_test_files_tfrecords/test_0.tfrecords --train=false --summary_dir=../../experiments/sabour_mnist/ck_attempt_0_baseline/results_%s --checkpoint=../../experiments/sabour_mnist/ck_attempt_0_baseline/train/model.ckpt-%s --hparams_override remake=0 --dataset ck --model=baseline --validate=true' % (str(model_num),str(model_num)) all_commands.append(command_curr) util.writeFile(out_file_sh,all_commands)
def make_im_list(): dir_meta = '../data/emotionet' im_size = [256, 256] out_dir_meta = os.path.join( dir_meta, 'preprocess_im_' + str(im_size[0]) + '_color_nodetect') im_file_list = out_dir_meta + '_list_1.txt' im_list = glob.glob(os.path.join(out_dir_meta, '*', '*.jpg')) print len(im_list) im_list.sort() util.writeFile(im_file_list, im_list)
def hack(): dir_curr = '../experiments/khorrami_capsule_7_3_bigclass3/ck_96_train_test_files_0_reconstruct_True_True_all_aug_margin_False_wdecay_0_600_exp_0.96_350_1e-06_0.001_0.001_0.001' out_file = os.path.join(dir_curr,'log.txt') val_lines = [] for model_to_test in range(0,600,30)+[599]: val_line = util.readLinesFromFile(os.path.join(dir_curr,'results_model_'+str(model_to_test),'log.txt'))[-1] val_lines.append(val_line) util.writeFile(out_file,val_lines)
def write_train_test_files(train_file, post_pend, out_dir_labels): out_file = train_file[:train_file.rindex('.')]+'_'+post_pend+'.txt' npy_files, anno_all = readTrainTestFile(train_file) out_lines = [] for npy_file, anno_curr in zip(npy_files, anno_all): label_file = os.path.join(out_dir_labels,os.path.split(npy_file)[1]) assert os.path.exists(label_file) line_curr = ' '.join([str(val) for val in [npy_file, label_file]+ anno_curr]) # print line_curr out_lines.append(line_curr) util.writeFile(out_file, out_lines)