def saveFlowImFromFloDir(flo_dir,match_info_file,out_dir_flo_im): h5_files,img_files,img_sizes=po.parseInfoFile(match_info_file,lim=None) print len(img_files); flo_files=[os.path.join(flo_dir,file_name+'.flo') for file_name in util.getFileNames(img_files,ext=False)]; out_files=[os.path.join(out_dir_flo_im,file_name+'.png') for file_name in util.getFileNames(img_files,ext=False)]; args=[]; for idx,(flo_file,out_file) in enumerate(zip(flo_files,out_files)): args.append((flo_file,out_file,idx)) p=multiprocessing.Pool(multiprocessing.cpu_count()) p.map(saveFlowImage,args)
def script_writeFloVizHTML(out_file_html,out_dir_viz,flo_files,im_files,tif_files,clusters,tifAsPng=False): img_paths=[]; captions=[]; # idx=0; for flo_file,im_file,tif_file in zip(flo_files,im_files,tif_files): # print idx; # print tif_file assert os.path.exists(tif_file); assert os.path.exists(im_file); # print tif_file # if not os.path.exists(tif_file) or not os.path.exists(im_file) : # continue; file_name=util.getFileNames([flo_file],ext=False)[0]; out_file_pre=os.path.join(out_dir_viz,file_name); out_file_flo_viz=out_file_pre+'_flo.png'; out_files_tif=[out_file_pre+'_tifim_x.png',out_file_pre+'_tifim_y.png',out_file_pre+'_tifflo.png']; if not os.path.exists(out_file_flo_viz): flo=util.readFlowFile(flo_file); po.saveFloFileViz(flo_file,out_file_flo_viz); for idx,out_file_tif_viz in enumerate(out_files_tif): tif=scipy.misc.imread(tif_file)[:,:,:2]; if idx==0 and not os.path.exists(out_file_tif_viz): tif_flo=replaceClusterIdWithFlow(tif,clusters); po.saveMatFloViz(tif_flo,out_file_tif_viz); if not os.path.exists(out_file_tif_viz) and idx==1: tif_x=np.array(tif[:,:,0]*(255.0/clusters.shape[0]),dtype=int); tif_x=np.dstack((tif_x,tif_x,tif_x)); scipy.misc.imsave(out_file_tif_viz,tif_x); if not os.path.exists(out_file_tif_viz) and idx==2: tif_x=np.array(tif[:,:,1]*(255.0/clusters.shape[0]),dtype=int); tif_x=np.dstack((tif_x,tif_x,tif_x)); scipy.misc.imsave(out_file_tif_viz,tif_x); img_paths_curr=[im_file,out_file_flo_viz]+out_files_tif; im_name=util.getFileNames([im_file],ext=False)[0]; captions_curr=[im_name,'flo_viz']+['tif_flo_viz']*len(out_files_tif) # if tifAsPng: # img_paths_curr.append(out_file_tif_viz.replace('_x.png','_y.png')); # captions_curr.append('tif_flo_viz'); img_paths_curr=[util.getRelPath(file_curr) for file_curr in img_paths_curr]; img_paths.append(img_paths_curr); captions.append(captions_curr); # idx=idx+1; visualize.writeHTML(out_file_html,img_paths,captions)
def script_testOnYoutube(): val_file='/disk2/mayExperiments/finetuning_youtube_hmdb_llr/val_eq.txt' out_dir='/disk2/mayExperiments/eval_ucf_finetune'; clusters_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat'; gpu=0; util.mkdir(out_dir); # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/final.caffemodel'; # out_dir_model=os.path.join(out_dir,'original_model'); model_file='/disk2/mayExperiments/ft_youtube_hmdb_ucfClusters/OptFlow_youtube_hmdb__iter_55000.caffemodel'; out_dir_model=os.path.join(out_dir,'ft_ucf_model'); util.mkdir(out_dir_model); out_dir_flo=os.path.join(out_dir_model,'flo'); out_dir_flo_viz=os.path.join(out_dir_model,'flo_viz'); util.mkdir(out_dir_flo);util.mkdir(out_dir_flo_viz) num_to_pick=20; img_paths=util.readLinesFromFile(val_file); img_paths=[img_path[:img_path.index(' ')] for img_path in img_paths]; class_names=[file_curr[:file_curr.index('_')] for file_curr in util.getFileNames(img_paths)]; classes=list(set(class_names)); class_names=np.array(class_names); img_paths_test=[]; for class_curr in classes: idx_rel=np.where(class_names==class_curr)[0]; idx_rel=idx_rel[:num_to_pick]; img_paths_test.extend([img_paths[idx_curr] for idx_curr in idx_rel]); # po.script_saveFlosAndViz(img_paths_test,out_dir_flo,out_dir_flo_viz,gpu,model_file,clusters_file); out_file_html=os.path.join(out_dir,'model_comparison.html'); out_dirs_flo_viz=[os.path.join(out_dir,'original_model','flo_viz'),os.path.join(out_dir,'ft_ucf_model','flo_viz')]; out_dirs_flo_viz_captions=['original_model','ft_ucf_model']; img_paths_html=[]; captions_html=[]; img_names=util.getFileNames(img_paths_test,ext=False); for img_path_test,img_name in zip(img_paths_test,img_names): row_curr=[]; row_curr.append(util.getRelPath(img_path_test)); for out_dir_curr in out_dirs_flo_viz: file_curr=os.path.join(out_dir_curr,img_name+'.png'); row_curr.append(util.getRelPath(file_curr)); captions_curr=[img_name]+out_dirs_flo_viz_captions; img_paths_html.append(row_curr) captions_html.append(captions_curr); visualize.writeHTML(out_file_html,img_paths_html,captions_html);
def insertMsg(f9Dir, cwd, keywords = ["msg"]): fileNames = util.getFileNames(f9Dir, cwd, keywords) for fileName in fileNames: #get the msg file msgFile = fileName.replace(".j", "") msgFile += "_msg.txt" #read in the contents of the quest file #and clean out any previous messages w = open(fileName, 'r') t = w.read() t = getMsg.sub("", t) w.close() #open the message file up w = open(msgFile, 'r') msg = w.read() w.close() #clean up its newlines #and add our own, notated as \l msg = msg.replace("\n", "") msg = msg.replace("\l", "\n") #create our new quest file t = t.replace("SetDescription(q, )", 'SetDescription(q, "' + msg.encode("string-escape") + '")', 1) #t = getMsg.sub(msg, t) #open it up and replace its contents with the new msg w = open(fileName, 'w') print>>w, t w.close()
def saveSlidingWindows((im_path,stride,w,out_dir,idx)): print idx im=scipy.misc.imread(im_path); r_idx_all=range(0,im.shape[0],stride); c_idx_all=range(0,im.shape[1],stride); im_name=util.getFileNames([im_path],ext=False)[0]; ext=im_path[im_path.rindex('.'):]; out_files=[]; for idx_r_idx,r_idx in enumerate(r_idx_all): end_r=r_idx+w; if end_r>im.shape[0]: continue; for idx_c_idx,c_idx in enumerate(c_idx_all): out_file=os.path.join(out_dir,im_name+'_'+str(idx_r_idx)+'_'+str(idx_c_idx)+ext); end_c=c_idx+w; if end_c>im.shape[1]: continue; if len(im.shape)>2: im_curr=im[r_idx:end_r,c_idx:end_c,:]; else: im_curr=im[r_idx:end_r,c_idx:end_c]; out_files.append(out_file); scipy.misc.imsave(out_file,im_curr); return out_files;
def insertMsg(f9Dir, cwd, keywords=["msg"]): fileNames = util.getFileNames(f9Dir, cwd, keywords) for fileName in fileNames: #get the msg file msgFile = fileName.replace(".j", "") msgFile += "_msg.txt" #read in the contents of the quest file #and clean out any previous messages w = open(fileName, 'r') t = w.read() t = getMsg.sub("", t) w.close() #open the message file up w = open(msgFile, 'r') msg = w.read() w.close() #clean up its newlines #and add our own, notated as \l msg = msg.replace("\n", "") msg = msg.replace("\l", "\n") #create our new quest file t = t.replace( "SetDescription(q, )", 'SetDescription(q, "' + msg.encode("string-escape") + '")', 1) #t = getMsg.sub(msg, t) #open it up and replace its contents with the new msg w = open(fileName, 'w') print >> w, t w.close()
def makeImTifViz(img_paths_all,tif_paths_all,out_file_html,out_dir_tif,num_clusters=40,disk_path='/disk2'): out_files_tif_x=[os.path.join(out_dir_tif,img_name+'_x.png') for img_name in util.getFileNames(tif_paths_all,ext='False')]; out_files_tif_y=[os.path.join(out_dir_tif,img_name+'_y.png') for img_name in util.getFileNames(tif_paths_all,ext='False')]; for tif_path,out_file_x,out_file_y in zip(tif_paths_all,out_files_tif_x,out_files_tif_y): tif=scipy.misc.imread(tif_path); # print np.min(tif[:,:,:2]),np.max(tif[:,:,:2]) assert np.min(tif[:,:,:2])>0 and np.max(tif[:,:,:2])<num_clusters+1; saveTifGray(tif,out_file_x,out_file_y,num_clusters) # out_file_html=out_dir_tif+'.html'; img_paths_html=[[util.getRelPath(img_curr,disk_path) for img_curr in img_list] for img_list in zip(img_paths_all,out_files_tif_x,out_files_tif_y)]; # captions_html=[[util.getFileNames([img_curr],ext=False)[0] for img_curr in img_list] for img_list in zip(img_paths_all,out_files_tif_x,out_files_tif_y)]; captions_html=[['Image','Tif_x','Tif_y']]*len(img_paths_html); visualize.writeHTML(out_file_html,img_paths_html,captions_html);
def constants(constantsDir, constantsFile, cwd, keywords = [], rawCode = "", suffix = "", alt = ""): fileNames = util.getFileNames(constantsDir, cwd, keywords) varNames = [util.getFileName.findall(x)[0] for x in fileNames] varNames = [x.replace("_hero", "") for x in varNames] varNames = [x.replace("_vendor", "") for x in varNames] path = os.path.join(CONSTANTS_DIR, constantsFile) w = open(path, 'w') header = "library " + (constantsDir + alt).title() + "Constants\n" header = header.replace("_", "") header += "globals\n" i = 0 objID = rawCode + "000" if rawCode == "N": i = 1 elif rawCode == "w": i = "vZZZ" elif rawCode == "g": objID = rawCode + "001" for var in varNames: value = i if rawCode != "" and rawCode != "w" and rawCode != "g": value = int2Rawcode(value, rawCode) elif rawCode == "w" or rawCode == "g": value = "'" + objID + "'" objID = util.add(objID) header += "\tconstant integer " + var.upper() + suffix + " = " + str(value) + "\n" if rawCode != "w" and rawCode != "g": i += 1 header += "endglobals\nendlibrary" print>>w, header w.close()
def stitchH5s(img_name,h5_files,img_files,C): if type(C)==type('str'): C=readClustersFile(C); num_parts_r=int(img_files[0].split('_')[-4]); num_parts_c=int(img_files[0].split('_')[-3]); img_files_names=util.getFileNames(img_files); file_pre=img_name+'_'+str(num_parts_r)+'_'+str(num_parts_c); for r_idx in range(num_parts_r): row_curr=[]; for c_idx in range(num_parts_c): file_start=file_pre+'_'+str(r_idx)+'_'+str(c_idx); idx=[idx for idx,file_curr in enumerate(img_files_names) if file_curr.startswith(file_start)]; # print len(idx),img_files_names assert len(idx)==1; idx=idx[0]; np_data_curr=readH5(h5_files[idx]); np_data_curr=np_data_curr[0]; # print 'np_data_curr.shape',np_data_curr.shape np_data_curr=np.transpose(np_data_curr,(1,2,0)); # print 'np_data_curr.shape',np_data_curr.shape row_curr.append(np_data_curr) row_data=np.hstack(tuple(row_curr)); # print 'row_data.shape',row_data.shape if r_idx==0: data_block=row_data; else: data_block=np.vstack((data_block,row_data)); return data_block
def script_reSaveMatOverlap(): dir_old='/disk3/maheen_data/pedro_val/mat_overlap'; dir_new='/disk3/maheen_data/pedro_val/mat_overlap_check'; util.mkdir(dir_new); path_to_im='/disk2/ms_coco/val2014'; path_to_gt='/disk2/mayExperiments/validation_anno'; mat_overlap_files=util.getFilesInFolder(dir_old,ext='.npz'); im_names=util.getFileNames(mat_overlap_files,ext=False); args=[]; for idx,(mat_overlap_file,im_name) in enumerate(zip(mat_overlap_files,im_names)): gt_file=os.path.join(path_to_gt,im_name+'.npy'); im_file=os.path.join(path_to_im,im_name+'.jpg'); out_file=os.path.join(dir_new,im_name+'.npz'); # if os.path.exists(out_file): # continue; args.append((mat_overlap_file,gt_file,im_file,out_file,idx)); p = multiprocessing.Pool(32); p.map(fixMatOverlap, args);
def rescaleImAndSaveMeta(img_paths,meta_dir,power_scale_range=(-2,1),step_size=0.5): img_names=util.getFileNames(img_paths); power_range=np.arange(power_scale_range[0],power_scale_range[1]+1,step_size); scales=[2**val for val in power_range]; scale_infos=[]; for idx,scale in enumerate(scales): out_dir_curr=os.path.join(meta_dir,str(idx)); util.mkdir(out_dir_curr); scale_infos.append((out_dir_curr,scale)); args=[]; idx=0; for idx_img,img_path in enumerate(img_paths): for out_dir_curr,scale in scale_infos: out_file=os.path.join(out_dir_curr,img_names[idx_img]); if os.path.exists(out_file): continue; args.append((img_path,out_file,scale,idx)); idx=idx+1; p=multiprocessing.Pool(multiprocessing.cpu_count()); p.map(rescaleImAndSave,args);
def script_saveHumanOnlyNeg(): out_file='/disk2/aprilExperiments/positives_160_human.txt' out_dir='/disk2/aprilExperiments/negatives_npy_onlyHuman'; util.mkdir(out_dir); im_pre='COCO_train2014_' lines=util.readLinesFromFile(out_file); img_files=[line[:line.index(' ')] for line in lines]; img_names=util.getFileNames(img_files,ext=False); img_name=img_names[0]; print img_name img_name_split=img_name.split('_'); idx_all=[int(img_name.split('_')[-1]) for img_name in img_names]; print len(img_names),len(idx_all),idx_all[0]; cat_id=1; path_to_anno='/disk2/ms_coco/annotations'; anno_file='instances_train2014.json'; anno=json.load(open(os.path.join(path_to_anno,anno_file),'rb'))['annotations']; script_saveBboxFiles(anno,out_dir,im_pre,idx_all,cat_id)
def main(): out_file_html='/disk3/maheen_data/headC_160_withFlow_justHuman/overlay_flo_50_viz.html' folders=['/disk3/maheen_data/headC_160_noFlow_justHuman/gt_overlap_50_viz','/disk3/maheen_data/headC_160_withFlow_justHuman/gt_overlap_50_viz','/disk3/maheen_data/headC_160_withFlow_justHuman/overlay_flo_50_viz']; img_names=util.getFileNames(util.getFilesInFolder(folders[0],ext='.png'),ext=True); height=500; width=500; captions=['no flo','with flo','boxes with flo'] visualize.writeHTMLForDifferentFolders(out_file_html,folders,captions,img_names,height=height,width=width); print out_file_html; return params_dict={}; params_dict ['mat_overlap_dir'] = '/disk3/maheen_data/headC_160_withFlow_justHuman/mat_overlaps_1000'; params_dict ['gt_dir'] = '/disk3/maheen_data/val_anno_human_only'; params_dict ['thresh_overlap'] = 0.5; params_dict ['flo_viz_dir'] = '/disk3/maheen_data/headC_160_withFlow_justHuman/im_with_padding/flo'; params_dict ['results_dir_meta'] = '/disk3/maheen_data/headC_160_withFlow_justHuman/results'; params_dict ['path_to_im_canon'] = '/disk3/maheen_data/headC_160_withFlow_justHuman/im/4'; params_dict ['out_dir_overlay_flo'] = '/disk3/maheen_data/headC_160_withFlow_justHuman/overlay_flo_50_viz'; params_dict ['gt_color'] = (255,0,0); params_dict ['pred_color'] = (255,255,255); params_dict ['alpha_overlay'] = 0.4; params_dict ['w'] = 160; params_dict ['stride'] = 16; params_dict ['power_scale_range'] = (-2,1); params_dict ['power_step_size'] = 0.5; params_dict ['num_threads'] = multiprocessing.cpu_count(); params=createParams('saveFloVizAll'); params=params(**params_dict); script_saveFloVizAll(params) pickle.dump(params._asdict(),open(os.path.join(params.out_dir_overlay_flo,'params_saveFloVizAll.p'),'wb'));
def writeTrainFilesWithFlow(old_train_file,dir_flo_im,new_train_file,ext='.png'): lines=util.readLinesFromFile(old_train_file); img_files=[line[:line.index(' ')] for line in lines]; file_names=util.getFileNames(img_files,ext=False); flo_im_files=[os.path.join(dir_flo_im,file_name+ext) for file_name in file_names]; for flo_im_file in flo_im_files: assert os.path.exists(flo_im_file); lines_new=[line+' '+flo_im_curr for line,flo_im_curr in zip(lines,flo_im_files)]; util.writeFile(new_train_file,lines_new);
def script_saveSegSavingInfoFiles(): dir_overlaps = '/disk3/maheen_data/headC_160_noFlow_bbox/mat_overlaps_no_neg_1000'; out_dir='/disk3/maheen_data/debugging_score_and_scale'; img_dir_meta='/disk2/mayExperiments/validation/rescaled_images'; out_dir_npy=os.path.join(out_dir,'npy_for_idx'); out_file_test_pre=os.path.join(out_dir,'test_with_seg'); # out_file_test_big=os.path.join(out_dir,'test_with_seg_big.txt'); util.mkdir(out_dir_npy); num_to_pick=10; mat_overlaps = util.getFilesInFolder(dir_overlaps,'.npz'); # mat_overlaps = mat_overlaps[:10]; args=[]; for idx_mat_overlap_file,mat_overlap_file in enumerate(mat_overlaps): args.append((mat_overlap_file,num_to_pick,idx_mat_overlap_file)); p = multiprocessing.Pool(multiprocessing.cpu_count()); pred_scores_all = p.map(loadAndPickN,args); print len(args); lines_to_write={}; # lines_to_write_big=[]; img_names=util.getFileNames(mat_overlaps,ext=False); for img_name,pred_scores in zip(img_names,pred_scores_all): img_num_uni=np.unique(pred_scores[:,1]); for img_num in img_num_uni: img_num=int(img_num); curr_im=os.path.join(img_dir_meta,str(img_num),img_name+'.jpg'); # print curr_im assert os.path.exists(curr_im); out_dir_npy_curr = os.path.join(out_dir_npy,str(img_num)); util.mkdir(out_dir_npy_curr); out_file = os.path.join(out_dir_npy_curr,img_name+'.npy'); pred_scores_rel = pred_scores[pred_scores[:,1]==img_num,:]; np.save(out_file,pred_scores_rel); if img_num in lines_to_write: lines_to_write[img_num].append(curr_im+' '+out_file); else: lines_to_write[img_num]=[curr_im+' '+out_file]; for img_num in lines_to_write.keys(): out_file_test=out_file_test_pre+'_'+str(img_num)+'.txt'; print out_file_test,len(lines_to_write[img_num]); util.writeFile(out_file_test,lines_to_write[img_num]);
def getRelevantFilesFromMatchFile(out_file_info,img_name): # print 'out_file_info',out_file_info h5_files,img_files,img_sizes=parseInfoFile(out_file_info); img_names=util.getFileNames(img_files,ext=False); idx_rel=[idx for idx,file_curr in enumerate(img_names) if file_curr.startswith(img_name)]; h5_files=[h5_files[idx] for idx in idx_rel]; img_files=[img_files[idx] for idx in idx_rel]; img_sizes=[img_sizes[idx] for idx in idx_rel]; return h5_files,img_files,img_sizes
def script_saveFlowIm(): pos_file='/disk2/aprilExperiments/positives_160_human.txt'; # out_dir='/disk3/maheen_data/headC_160/noFlow_gaussian_human/pos_flos' out_dir='/disk3/maheen_data/headC_160/neg_flos' out_dir_flo_im=os.path.join(out_dir,'flo_im'); util.mkdir(out_dir_flo_im); match_info_file=os.path.join(out_dir,'flo','match_info.txt'); flo_dir=os.path.join(out_dir,'flo','flo_files'); h5_files,img_files,img_sizes=po.parseInfoFile(match_info_file,lim=None) print len(img_files); flo_files=[os.path.join(flo_dir,file_name+'.flo') for file_name in util.getFileNames(img_files,ext=False)]; out_files=[os.path.join(out_dir_flo_im,file_name+'.png') for file_name in util.getFileNames(img_files,ext=False)]; args=[]; for idx,(flo_file,out_file) in enumerate(zip(flo_files,out_files)): args.append((flo_file,out_file,idx)) p=multiprocessing.Pool(multiprocessing.cpu_count()) p.map(saveFlowImage,args)
def script_sanityCheckEquivalenceClusters(): old_train_txt='/disk2/mayExperiments/ft_youtube_hmdb_newClusters/train.txt'; new_train_txt='/disk3/maheen_data/ft_youtube_40/train.txt'; tif_list_old=getTifsFromTrain(old_train_txt); tif_list_new=getTifsFromTrain(new_train_txt); print len(tif_list_old),len(tif_list_new); dir_meta_old=getDirMetaFromTifPath(tif_list_old[0]); dir_meta_new=getDirMetaFromTifPath(tif_list_new[0]); image_dir_old=getImageDirFromTifPath(tif_list_old[0]); image_dir_new=getImageDirFromTifPath(tif_list_new[0]); print dir_meta_old,dir_meta_new,image_dir_old,image_dir_new # return im_names_old=util.getFileNames(tif_list_old); im_names_new=util.getFileNames(tif_list_new); tif_list_both=list(set(im_names_old).intersection(set(im_names_new))); print len(tif_list_both) num_to_pick=100; random.shuffle(tif_list_both); tif_list_both=tif_list_both[:num_to_pick]; for tif_name in tif_list_both: video_name=tif_name[:tif_name.index('.')]; old_tif_path=os.path.join(dir_meta_old,video_name,image_dir_old,tif_name); new_tif_path=os.path.join(dir_meta_new,video_name,image_dir_new,tif_name); tif_new=scipy.misc.imread(new_tif_path); tif_old=scipy.misc.imread(old_tif_path); assert np.array_equal(tif_new,tif_old);
def makeFloVizHTML(out_file_html,img_paths,dir_flo_viz): # out_file_html=os.path.join(out_dir,'flo_viz.html'); img_paths_html=[]; captions_html=[]; for img_path,img_file_name in zip(img_paths,util.getFileNames(img_paths,ext=False)): out_file_flo_viz=os.path.join(dir_flo_viz,img_file_name+'.png'); if img_path.startswith('/disk2'): img_path='/disk3'+img_path; img_paths_curr=[util.getRelPath(img_path,'/disk3'),util.getRelPath(out_file_flo_viz,'/disk3')]; img_paths_html.append(img_paths_curr); captions_html.append([img_file_name,'flo']); visualize.writeHTML(out_file_html,img_paths_html,captions_html);
def insert(insertFile, insertDir, newFile, cwd, header, keywords = []): fileNames = util.getFileNames(insertDir, cwd, keywords) insertFile = util.find(insertFile, util.find(cwd, util.root)) insertions = "" for fileName in fileNames: w = open(fileName, 'r') insertions += w.read() + "\n" w.close() w = open(insertFile, 'r') t = w.read().replace(header, insertions) insertFile = insertFile.replace(getInsertFileDir.findall(insertFile)[-1], '') w = open(os.path.join(insertFile, newFile), 'w') print>>w, t w.close()
def script_checkSuppressFlowMatlabCode(): dir_meta='/disk2/temp/aeroplane_10_3'; dir_bef=os.path.join(dir_meta,'noThresh'); dir_aft=os.path.join(dir_meta,'withThresh'); out_dir=os.path.join(dir_meta,'viz'); util.mkdir(out_dir) tif_files=util.getFilesInFolder(dir_bef,'.tif'); tif_files=util.getFileNames(tif_files); for file_curr in tif_files: file_bef=os.path.join(dir_bef,file_curr); file_aft=os.path.join(dir_aft,file_curr); tif_bef_one=scipy.misc.imread(file_bef); tif_bef_one=tif_bef_one[:,:,0]; tif_aft_one=scipy.misc.imread(file_aft); tif_aft_one=tif_aft_one[:,:,0]; mat_info_bef=scipy.io.loadmat(os.path.join(dir_bef,file_curr[:file_curr.rindex('.')]+'.mat')); R=mat_info_bef['R']; L=mat_info_bef['L']; # print optFlow.shape,R.shape,L.shape # optFlow=np.dstack((optFlow,np.zeros((optFlow.shape[0],optFlow.shape[1],1)))); # optFlow=cv2.resize(optFlow,(20,20)); # optFlow=cv2.resize(optFlow,(R.shape[1],R.shape[0])); # print optFlow.shape # mag_bef_o=np.power(np.power(optFlow[:,:,0],2)+np.power(optFlow[:,:,1],2),0.5); mag_bef=np.power(np.power(R,2)+np.power(L,2),0.5); idx=np.where(mag_bef<1.0); # print idx[0].shape; # idx_o=np.where(mag_bef_o<1.0); # print idx_o[0].shape # print np.setdiff1d(idx[0],idx_o[0]) # print np.setdiff1d(idx[1],idx_o[1]) # break; print 'BEFORE' print np.unique(R[idx]); print np.unique(tif_bef_one[idx]); print 'AFTER' mat_info_aft=scipy.io.loadmat(os.path.join(dir_aft,file_curr[:file_curr.rindex('.')]+'.mat')); print np.unique(mat_info_aft['R'][idx]); print np.unique(tif_aft_one[idx]); assert np.unique(tif_aft_one[idx])[0]==40;
def dialog(dialogDir, cwd, keywords = ["msg"]): fileNames = util.getFileNames(dialogDir, cwd, keywords) for fileName in fileNames: textFile = fileName.replace(".j", "") textFile += "_msg.txt" msgs = process(textFile) w = open(fileName, 'r') t = w.read() t = getMsg.sub("", t) w.close() for msg in msgs: t = t.replace("StoryMsg()", 'StoryMsg("' + msg.encode("string-escape") + '")', 1) w = open(fileName, 'w') print>>w, t w.close()
def averageMagnitudes((img_name,img_size_org,filter_size,step_size,img_files,h5_files,img_sizes,C,out_dir,idx_img_name)): print idx_img_name if type(C)==type('str'): C=readClustersFile(C); img_files_names=util.getFileNames(img_files,ext=False); r_pad=getPadTuple(img_size_org[0],filter_size,step_size); c_pad=getPadTuple(img_size_org[1],filter_size,step_size); new_shape=(img_size_org[0]+r_pad[0]+r_pad[1],img_size_org[1]+c_pad[0]+c_pad[1]) assert (new_shape[0]-filter_size)%step_size==0 num_parts_r = (new_shape[0]-filter_size)/step_size+1 assert (new_shape[1]-filter_size)%step_size==0; num_parts_c = (new_shape[1]-filter_size)/step_size+1 total_arr=np.zeros(new_shape); count_arr=np.zeros(new_shape); for r_idx_curr in range(num_parts_r): for c_idx_curr in range(num_parts_c): file_rel_start=img_name+'_'+str(r_idx_curr)+'_'+str(c_idx_curr); h5_file=h5_files[img_files_names.index(file_rel_start)]; img_size=img_sizes[img_files_names.index(file_rel_start)]; im_curr=getMatFromH5(h5_file,img_size,C) mag=getFlowMag(im_curr); start_r=r_idx_curr*step_size; start_c=c_idx_curr*step_size; end_r=start_r+filter_size; end_c=start_c+filter_size; assert end_r-start_r==mag.shape[0]; assert end_c-start_c==mag.shape[1]; total_arr[start_r:end_r,start_c:end_c]=total_arr[start_r:end_r,start_c:end_c]+mag; count_arr[start_r:end_r,start_c:end_c]=count_arr[start_r:end_r,start_c:end_c]+1; avg_arr=total_arr/count_arr; out_file_name=os.path.join(out_dir,img_name+'.npy'); np.save(out_file_name,avg_arr); # util.writeFlowFile(total_arr/count_arr,out_file_name); return out_file_name
def dialog(dialogDir, cwd, keywords=["msg"]): fileNames = util.getFileNames(dialogDir, cwd, keywords) for fileName in fileNames: textFile = fileName.replace(".j", "") textFile += "_msg.txt" msgs = process(textFile) w = open(fileName, 'r') t = w.read() t = getMsg.sub("", t) w.close() for msg in msgs: t = t.replace("StoryMsg()", 'StoryMsg("' + msg.encode("string-escape") + '")', 1) w = open(fileName, 'w') print >> w, t w.close()
def constantsStr(constantsDir, constantsFile, cwd, keywords = [], suffix = "_STR"): fileNames = util.getFileNames(constantsDir, cwd, keywords) varNames = [util.getFileName.findall(x)[0] for x in fileNames] path = os.path.join(CONSTANTS_DIR, constantsFile) w = open(path, 'w') header = "library " + constantsDir.title() + "StringConstants\n" header = header.replace("_", "") header += "globals\n" for x in range (0, len(fileNames)): u = open(fileNames[x], 'r') value = getObjectName.findall(u.read())[0] u.close() header += "\tconstant string " + varNames[x].upper() + suffix + " = \"" + value + "\"\n" header += "endglobals\nendlibrary" print>>w, header w.close()
def insert(insertFile, insertDir, newFile, cwd, header, keywords = [], sort = False): fileNames = util.getFileNames(insertDir, cwd, keywords) if sort: fileNames = sorted(fileNames, key=lambda filename: os.stat(filename).st_ctime) insertFile = util.find(insertFile, util.find(cwd, util.root)) insertions = "" for fileName in fileNames: w = open(fileName, 'r') insertions += w.read() + "\n" w.close() w = open(insertFile, 'r') t = w.read().replace(header, insertions) insertFile = insertFile.replace(getInsertFileDir.findall(insertFile)[-1], '') w = open(os.path.join(insertFile, newFile), 'w') print>>w, t w.close()
def constantsLib(constantsDir, constantsFile, cwd, keywords = []): path = os.path.join(CONSTANTS_DIR, constantsFile) fileNames = util.getFileNames(constantsDir, cwd, keywords) requiresStr = " requires " p = re.compile(r', $') for fileName in fileNames: w = open(fileName, 'r') t = w.read() w.close() libName = getLibName.findall(t) if libName != []: requiresStr += libName[0] + ", " requiresStr = p.sub("", requiresStr) header = "library Constants" + requiresStr + "\nglobals\nendglobals\nendlibrary" w = open(path, 'w') print>>w, header w.close()
def script_saveFlosAndViz(img_paths,dir_test,flo_viz_dir,gpu,model_file,clusters_file,train_val_file=None,overwrite=False): # h5_dir=os.path.join(dir_test,'h5'); # flo_dir=os.path.join(dir_test,'flo'); # flo_viz_dir=os.path.join(dir_test,'flo_viz'); # util.mkdir(h5_dir); # print train_val_file,'script_saveFlosAndViz'; script_saveFlos(img_paths,dir_test,gpu,model_file,clusters_file,train_val_file=train_val_file,overwrite=overwrite) flo_dir=os.path.join(dir_test,'flo_files'); flo_files=[os.path.join(flo_dir,file_curr) for file_curr in util.getFilesInFolder(flo_dir,'.flo')]; flo_files_names=util.getFileNames(flo_files,ext=False); flo_files_viz=[os.path.join(flo_viz_dir,file_curr+'.png') for file_curr in flo_files_names]; out_file_sh=flo_viz_dir+'.sh' writeScriptToGetFloViz(flo_files,flo_files_viz,out_file_sh); subprocess.call('sh '+out_file_sh,shell=True);
def questMsg(questDir, cwd, keywords = ["msg"]): fileNames = util.getFileNames(questDir, cwd, keywords) for fileName in fileNames: textFile = fileName.replace(".j", "") textFile += "_msg.txt" w = open(fileName, 'r') t = w.read() t = getMsg.sub("", t) w.close() w = open(textFile, 'r') msg = w.read() w.close() msg = msg.replace("\n", "") msg = msg.replace("\\l", "\n") t = t.replace("setDescription()", 'setDescription("' + msg.encode("string-escape") + '")', 1) w = open(fileName, 'w') print>>w, t w.close()
def questMsg(questDir, cwd, keywords=["msg"]): fileNames = util.getFileNames(questDir, cwd, keywords) for fileName in fileNames: textFile = fileName.replace(".j", "") textFile += "_msg.txt" w = open(fileName, "r") t = w.read() t = getMsg.sub("", t) w.close() w = open(textFile, "r") msg = w.read() w.close() msg = msg.replace("\n", "") msg = msg.replace("\\l", "\n") t = t.replace("setDescription()", 'setDescription("' + msg.encode("string-escape") + '")', 1) w = open(fileName, "w") print >> w, t w.close()
def script_saveFlos(img_paths,dir_test,gpu,model_file,clusters_file,overwrite=False,train_val_file=None): test_file=os.path.join(dir_test,'test.txt'); out_file_info=os.path.join(dir_test,'match_info.txt'); out_dir_flo=os.path.join(dir_test,'flo_files'); util.mkdir(out_dir_flo); C=readClustersFile(clusters_file); # print overwrite if (not os.path.exists(test_file)) or overwrite: makeTestFile(img_paths,test_file); # call the network # print train_val_file command=stj.getCommandForTest(test_file,model_file,gpu,train_val_file=train_val_file); # print command # raw_input(); # print command; # return subprocess.call(command,shell=True); # print overwrite; # raw_input(); # # get the h5 and img file correspondences if (not os.path.exists(out_file_info)) or overwrite: # print 'hello' saveOutputInfoFileMP(os.path.join(dir_test,'results'),out_file_info,img_paths) # saveOutputInfoFile(os.path.join(dir_test,'results'),out_file_info); h5_files,img_files,img_sizes=parseInfoFile(out_file_info); print len(h5_files) out_files_flo=[os.path.join(out_dir_flo,file_curr+'.flo') for file_curr in util.getFileNames(img_files,ext=False)]; args=[]; for idx in range(len(h5_files)): if not overwrite: if os.path.exists(out_files_flo[idx]): continue; args.append((h5_files[idx],img_sizes[idx],C,out_files_flo[idx],idx)) print len(args); p=multiprocessing.Pool(NUM_THREADS) p.map(saveH5AsFloMP,args)
def script_seeMultipleClusters(dir_clusters=None,out_dir_plot=None): if dir_clusters is None: dir_clusters='/disk3/maheen_data/debug_networks/clusters_youtube_multiple'; if out_dir_plot is None: out_dir_plot='/disk2/temp/cluster_plots'; util.mkdir(out_dir_plot); clusters_all=util.getFilesInFolder(dir_clusters,'.npy'); print len(clusters_all); for idx_cluster_file,cluster_file in enumerate(clusters_all): print idx_cluster_file; cluster_name=util.getFileNames([cluster_file],ext=False)[0]; out_file=os.path.join(out_dir_plot,cluster_name+'.png'); cluster_curr=np.load(cluster_file); visualize.plotScatter([(cluster_curr[:,0],cluster_curr[:,1])],out_file,color='r'); # files_all.append(out_file); visualize.writeHTMLForFolder(out_dir_plot,ext='.png',height=300,width=300);
def script_doEverything(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale_all, scale_images,lim_cases,gpu,model_file,clusters_file,train_val_file=None,overwrite=False): for scale in scale_all: # script_saveImCrops(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale,scale_images,lim_cases) for scale_image in scale_images: dir_scale=os.path.join(out_dir_scratch,scale+'_'+str(scale_image)); scale_info=pickle.load(open(out_file,'rb')); img_dirs=[os.path.join(dir_scale,im_curr_info[0]) for im_curr_info in scale_info[scale][:lim_cases]] for img_dir in img_dirs: img_paths=util.getFilesInFolder(img_dir,ext='.png'); if len(img_paths)==0: 'CONTINUING' continue; img_paths=[img_path for img_path in img_paths if not img_path.endswith('onImg.png')]; out_dir_flo=img_dir+'_pred_flo'; out_dir_flo_viz=img_dir+'_pred_flo_viz'; util.mkdir(out_dir_flo); util.mkdir(out_dir_flo_viz); po.script_saveFlosAndViz(img_paths,out_dir_flo,out_dir_flo_viz,gpu,model_file,clusters_file,train_val_file=train_val_file,overwrite=overwrite) img_names=util.getFileNames(img_paths,ext=False); out_dir_flo=img_dir+'_pred_flo'; out_dir_flo_viz=img_dir+'_pred_flo_viz'; out_file_html=img_dir+'.html'; img_paths_html=[]; captions_all=[]; for img_name in img_names: row_curr=[]; row_curr.append(util.getRelPath(os.path.join(img_dir,img_name+'_onImg.png'))); row_curr.append(util.getRelPath(os.path.join(img_dir,img_name+'.png'))); row_curr.append(util.getRelPath(os.path.join(out_dir_flo_viz,img_name+'.png'))); captions=['','',''] img_paths_html.append(row_curr); captions_all.append(captions); visualize.writeHTML(out_file_html,img_paths_html,captions_all);
def script_saveMatOverlapInfo(): val_file='/disk2/januaryExperiments/pedro_data/coco-proposals/val.npy'; val_data=np.load(val_file); print val_data.shape; print val_data[101,:]; print 'hello'; path_to_im='/disk2/ms_coco/val2014'; path_to_gt='/disk2/mayExperiments/validation_anno'; path_to_pred_us='/disk3/maheen_data/headC_160_noFlow_bbox/mat_overlaps_1000'; im_pre='COCO_val2014_'; im_ids=val_data[:,0]; im_ids=np.unique(im_ids); im_ids=np.sort(im_ids); # im_ids=im_ids[:5000]; print im_ids; im_files=[dp.addLeadingZeros(int(im_id),os.path.join(path_to_im,im_pre),'.jpg') for im_id in im_ids]; im_names=util.getFileNames(im_files,ext=False); print im_names[0] out_dir='/disk3/maheen_data/pedro_val/mat_overlap' idx_count=0; problem=0; for im_id,im_name,im_file in zip(im_ids,im_names,im_files): print idx_count rel_rows=val_data[val_data[:,0]==im_id,:]; assert np.unique(rel_rows[:,0]).size==1 out_file=os.path.join(out_dir,im_name+'.npz'); gt_file=os.path.join(path_to_gt,im_name+'.npy'); comparison_file=os.path.join(path_to_pred_us,im_name+'.npz'); if os.path.exists(gt_file) and os.path.exists(comparison_file): idx_count=idx_count+1; saveMatOverlapInfo(out_file,gt_file,rel_rows) else: problem=problem+1; if idx_count==5000: break;
def script_saveNonHumanOnlyNeg(): neg_file='/disk2/marchExperiments/deep_proposals/negatives.txt'; out_dir='/disk2/aprilExperiments/negatives_npy_onlyHuman' lines=util.readLinesFromFile(neg_file); npy_files=[line[line.index(' ')+1:] for line in lines]; npy_file_names=util.getFileNames(npy_files); exists=0; for idx_npy_file_name,npy_file_name in enumerate(npy_file_names): if idx_npy_file_name%100==0: print idx_npy_file_name; file_curr=os.path.join(out_dir,npy_file_name); if os.path.exists(file_curr): exists+=1; else: zeros=np.zeros((0,4)); np.save(file_curr,zeros); print exists,len(npy_file_names);
def reshapeFloFiles(flo_files,tif_files,out_dir_new_flos): for flo_file,tif_file in zip(flo_files,tif_files): # print flo_file flo=util.readFlowFile(flo_file); # print flo.shape tif=scipy.misc.imread(tif_file); # print tif.shape; flo_rs=cv2.resize(flo,(tif.shape[1],tif.shape[0])); # print flo_rs.shape flo_rs[:,:,0]=flo_rs[:,:,0]*(tif.shape[0]/float(flo.shape[0])); flo_rs[:,:,1]=flo_rs[:,:,1]*(tif.shape[1]/float(flo.shape[1])); flo_rs=flo_rs*5; # print np.min(flo_rs[:,:,0]),np.max(flo_rs[:,:,1]) # print np.min(flo[:,:,0]),np.max(flo[:,:,1]) flo_name=util.getFileNames([flo_file],ext=True)[0]; # print flo_name out_file_curr=os.path.join(out_dir_new_flos,flo_name); util.writeFlowFile(flo_rs,out_file_curr);
def saveBBoxImage(out_file, path_to_anno, out_dir_im, class_name='horse'): files = util.readLinesFromFile(out_file) just_names = util.getFileNames(files, ext=False) annotations = [ os.path.join(path_to_anno, just_name + '.xml') for just_name in just_names ] print len(annotations) for im_file, anno, just_name in zip(files, annotations, just_names): out_file_pre = os.path.join(out_dir_im, just_name + '_') obj = untangle.parse(anno) for idx_object_curr, object_curr in enumerate(obj.annotation.object): if object_curr.name.cdata == class_name: out_file = out_file_pre + str(idx_object_curr) + '.jpg' # if os.path.exists(out_file): # continue; bnd_box = [ object_curr.bndbox.xmin.cdata, object_curr.bndbox.ymin.cdata, object_curr.bndbox.xmax.cdata, object_curr.bndbox.ymax.cdata ] bnd_box = [int(coord) for coord in bnd_box] # print bnd_box; im = scipy.misc.imread(im_file) if len(im.shape) < 3: crop = im[bnd_box[1]:bnd_box[3], bnd_box[0]:bnd_box[2]] else: crop = im[bnd_box[1]:bnd_box[3], bnd_box[0]:bnd_box[2], :] print out_file scipy.misc.imsave(out_file, crop)
def main(): # print 'hello'; train_val_txt = '/Users/maheenrashid/Dropbox (Personal)/Davis_docs/Research/VOCdevkit 2/VOC2012/ImageSets/Main/horse_trainval.txt' path_to_im = '/Users/maheenrashid/Dropbox (Personal)/Davis_docs/Research/VOCdevkit 2/VOC2012/JPEGImages' path_to_anno = '/Users/maheenrashid/Dropbox (Personal)/Davis_docs/Research/VOCdevkit 2/VOC2012/Annotations' out_dir = '../pascal' util.mkdir(out_dir) out_file = os.path.join(out_dir, 'horse.txt') out_dir_im = '../pascal/just_horse_im' util.mkdir(out_dir_im) # saveBBoxImage(out_file,path_to_anno,out_dir_im) im_files = util.getFilesInFolder(out_dir_im, ext='.jpg') file_names = util.getFileNames(im_files, ext=True) batch_size = 20 batch_idx = util.getIdxRange(len(file_names), batch_size) print len(batch_idx) args = [] counter = 0 for idx_batch_start, batch_start in enumerate(batch_idx[:-1]): batch_end = batch_idx[idx_batch_start + 1] im_files_rel = im_files[batch_start:batch_end] file_names_rel = file_names[batch_start:batch_end] out_dir_curr = os.path.join(out_dir_im, str(idx_batch_start)) util.mkdir(out_dir_curr) for file_name, im_file_curr in zip(file_names_rel, im_files_rel): out_file = os.path.join(out_dir_curr, file_name) if not os.path.exists(out_file): args.append((im_file_curr, out_file, counter)) counter += 1 p = multiprocessing.Pool(multiprocessing.cpu_count()) print len(args) p.map(copyfile_wrapper, args)
def insert(insertFile, insertDir, newFile, cwd, header, keywords=[], sort=False): fileNames = util.getFileNames(insertDir, cwd, keywords) if sort: fileNames = sorted(fileNames, key=lambda filename: os.stat(filename).st_ctime) insertFile = util.find(insertFile, util.find(cwd, util.root)) insertions = "" for fileName in fileNames: w = open(fileName, 'r') insertions += w.read() + "\n" w.close() w = open(insertFile, 'r') t = w.read().replace(header, insertions) insertFile = insertFile.replace( getInsertFileDir.findall(insertFile)[-1], '') w = open(os.path.join(insertFile, newFile), 'w') print >> w, t w.close()
def writeImport(importDir, cwd, fileName, keywords = []): fileNames = util.getFileNames(importDir, cwd, keywords) w = open(os.path.join(SCRIPTS_DIR, fileName), 'w') for fileName in fileNames: print>>w, IMPORT_HEADER + '"' + fileName + '"' w.close()