def makeFloHtml(out_file_html,img_files,flo_files,height=200,width=200): img_paths=[]; captions=[]; for img_file,flo_file in zip(img_files,flo_files): img_path=[]; img_path.append(util.getRelPath(img_file,'/disk2')); img_path.append(util.getRelPath(flo_file,'/disk2')); img_paths.append(img_path); captions.append(['img','flo']); visualize.writeHTML(out_file_html,img_paths,captions,height,width);
def makeFloVizHTML(out_file_html,img_paths,dir_flo_viz): # out_file_html=os.path.join(out_dir,'flo_viz.html'); img_paths_html=[]; captions_html=[]; for img_path,img_file_name in zip(img_paths,util.getFileNames(img_paths,ext=False)): out_file_flo_viz=os.path.join(dir_flo_viz,img_file_name+'.png'); if img_path.startswith('/disk2'): img_path='/disk3'+img_path; img_paths_curr=[util.getRelPath(img_path,'/disk3'),util.getRelPath(out_file_flo_viz,'/disk3')]; img_paths_html.append(img_paths_curr); captions_html.append([img_file_name,'flo']); visualize.writeHTML(out_file_html,img_paths_html,captions_html);
def script_testOnYoutube(): val_file='/disk2/mayExperiments/finetuning_youtube_hmdb_llr/val_eq.txt' out_dir='/disk2/mayExperiments/eval_ucf_finetune'; clusters_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat'; gpu=0; util.mkdir(out_dir); # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/final.caffemodel'; # out_dir_model=os.path.join(out_dir,'original_model'); model_file='/disk2/mayExperiments/ft_youtube_hmdb_ucfClusters/OptFlow_youtube_hmdb__iter_55000.caffemodel'; out_dir_model=os.path.join(out_dir,'ft_ucf_model'); util.mkdir(out_dir_model); out_dir_flo=os.path.join(out_dir_model,'flo'); out_dir_flo_viz=os.path.join(out_dir_model,'flo_viz'); util.mkdir(out_dir_flo);util.mkdir(out_dir_flo_viz) num_to_pick=20; img_paths=util.readLinesFromFile(val_file); img_paths=[img_path[:img_path.index(' ')] for img_path in img_paths]; class_names=[file_curr[:file_curr.index('_')] for file_curr in util.getFileNames(img_paths)]; classes=list(set(class_names)); class_names=np.array(class_names); img_paths_test=[]; for class_curr in classes: idx_rel=np.where(class_names==class_curr)[0]; idx_rel=idx_rel[:num_to_pick]; img_paths_test.extend([img_paths[idx_curr] for idx_curr in idx_rel]); # po.script_saveFlosAndViz(img_paths_test,out_dir_flo,out_dir_flo_viz,gpu,model_file,clusters_file); out_file_html=os.path.join(out_dir,'model_comparison.html'); out_dirs_flo_viz=[os.path.join(out_dir,'original_model','flo_viz'),os.path.join(out_dir,'ft_ucf_model','flo_viz')]; out_dirs_flo_viz_captions=['original_model','ft_ucf_model']; img_paths_html=[]; captions_html=[]; img_names=util.getFileNames(img_paths_test,ext=False); for img_path_test,img_name in zip(img_paths_test,img_names): row_curr=[]; row_curr.append(util.getRelPath(img_path_test)); for out_dir_curr in out_dirs_flo_viz: file_curr=os.path.join(out_dir_curr,img_name+'.png'); row_curr.append(util.getRelPath(file_curr)); captions_curr=[img_name]+out_dirs_flo_viz_captions; img_paths_html.append(row_curr) captions_html.append(captions_curr); visualize.writeHTML(out_file_html,img_paths_html,captions_html);
def writeHTMLForDifferentFolders(out_file_html, folders, captions, img_names, rel_path_replace=None, height=200, width=200): if rel_path_replace is None: string_curr = folders[0] rel_path_replace = string_curr[:string_curr[1:].index('/') + 1] # print rel_path_replace img_paths = [] captions_html = [] for img_name in img_names: captions_row = [] img_paths_row = [] for caption_curr, folder in zip(captions, folders): img_paths_row.append( util.getRelPath(os.path.join(folder, img_name), rel_path_replace)) captions_row.append(caption_curr) img_paths.append(img_paths_row) captions_html.append(captions_row) writeHTML(out_file_html, img_paths, captions_html, height=height, width=width)
def script_writeHTMLStitchedFlos_wDirs(img_paths,out_file_html,viz_dirs): img_paths_html=[]; captions=[]; for img_path in img_paths: img_name=img_path[img_path.rindex('/')+1:img_path.rindex('.')]; img_paths_html_curr=[util.getRelPath(img_path)]; captions_curr=['im'] for viz_dir in viz_dirs: print viz_dir,img_path # img_path_curr=[os.path.join(viz_dir,file_curr) for file_curr in os.listdir(viz_dir) if file_curr.startswith(img_name)][0]; img_path_curr=os.path.join(viz_dir,img_name+'.png'); img_paths_html_curr.append(util.getRelPath(img_path_curr)); captions_curr.append(viz_dir[viz_dir.rindex('/')+1:]); img_paths_html.append(img_paths_html_curr); captions.append(captions_curr) visualize.writeHTML(out_file_html,img_paths_html,captions);
def script_writeHTMLStitchedFlos(out_file_html,out_file,out_dir,grid_sizes=[1,2,4,8],grid_dir_pre='grid_flo_viz_'): img_paths=util.readLinesFromFile(out_file); viz_dirs=[os.path.join(out_dir,grid_dir_pre+str(num)) for num in grid_sizes]; img_paths_html=[]; captions=[]; for img_path in img_paths: img_name=img_path[img_path.rindex('/')+1:img_path.rindex('.')]; img_paths_html_curr=[util.getRelPath(img_path)]; captions_curr=['im'] for viz_dir in viz_dirs: print viz_dir,img_path img_path_curr=[os.path.join(viz_dir,file_curr) for file_curr in os.listdir(viz_dir) if file_curr.startswith(img_name)][0]; img_paths_html_curr.append(util.getRelPath(img_path_curr)); captions_curr.append(viz_dir[viz_dir.rindex('/')+1:]); img_paths_html.append(img_paths_html_curr); captions.append(captions_curr) visualize.writeHTML(out_file_html,img_paths_html,captions);
def script_doEverything(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale_all, scale_images,lim_cases,gpu,model_file,clusters_file,train_val_file=None,overwrite=False): for scale in scale_all: # script_saveImCrops(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale,scale_images,lim_cases) for scale_image in scale_images: dir_scale=os.path.join(out_dir_scratch,scale+'_'+str(scale_image)); scale_info=pickle.load(open(out_file,'rb')); img_dirs=[os.path.join(dir_scale,im_curr_info[0]) for im_curr_info in scale_info[scale][:lim_cases]] for img_dir in img_dirs: img_paths=util.getFilesInFolder(img_dir,ext='.png'); if len(img_paths)==0: 'CONTINUING' continue; img_paths=[img_path for img_path in img_paths if not img_path.endswith('onImg.png')]; out_dir_flo=img_dir+'_pred_flo'; out_dir_flo_viz=img_dir+'_pred_flo_viz'; util.mkdir(out_dir_flo); util.mkdir(out_dir_flo_viz); po.script_saveFlosAndViz(img_paths,out_dir_flo,out_dir_flo_viz,gpu,model_file,clusters_file,train_val_file=train_val_file,overwrite=overwrite) img_names=util.getFileNames(img_paths,ext=False); out_dir_flo=img_dir+'_pred_flo'; out_dir_flo_viz=img_dir+'_pred_flo_viz'; out_file_html=img_dir+'.html'; img_paths_html=[]; captions_all=[]; for img_name in img_names: row_curr=[]; row_curr.append(util.getRelPath(os.path.join(img_dir,img_name+'_onImg.png'))); row_curr.append(util.getRelPath(os.path.join(img_dir,img_name+'.png'))); row_curr.append(util.getRelPath(os.path.join(out_dir_flo_viz,img_name+'.png'))); captions=['','',''] img_paths_html.append(row_curr); captions_all.append(captions); visualize.writeHTML(out_file_html,img_paths_html,captions_all);
def script_writeFloVizHTML(out_file_html,out_dir_viz,flo_files,im_files,tif_files,clusters,tifAsPng=False): img_paths=[]; captions=[]; # idx=0; for flo_file,im_file,tif_file in zip(flo_files,im_files,tif_files): # print idx; # print tif_file assert os.path.exists(tif_file); assert os.path.exists(im_file); # print tif_file # if not os.path.exists(tif_file) or not os.path.exists(im_file) : # continue; file_name=util.getFileNames([flo_file],ext=False)[0]; out_file_pre=os.path.join(out_dir_viz,file_name); out_file_flo_viz=out_file_pre+'_flo.png'; out_files_tif=[out_file_pre+'_tifim_x.png',out_file_pre+'_tifim_y.png',out_file_pre+'_tifflo.png']; if not os.path.exists(out_file_flo_viz): flo=util.readFlowFile(flo_file); po.saveFloFileViz(flo_file,out_file_flo_viz); for idx,out_file_tif_viz in enumerate(out_files_tif): tif=scipy.misc.imread(tif_file)[:,:,:2]; if idx==0 and not os.path.exists(out_file_tif_viz): tif_flo=replaceClusterIdWithFlow(tif,clusters); po.saveMatFloViz(tif_flo,out_file_tif_viz); if not os.path.exists(out_file_tif_viz) and idx==1: tif_x=np.array(tif[:,:,0]*(255.0/clusters.shape[0]),dtype=int); tif_x=np.dstack((tif_x,tif_x,tif_x)); scipy.misc.imsave(out_file_tif_viz,tif_x); if not os.path.exists(out_file_tif_viz) and idx==2: tif_x=np.array(tif[:,:,1]*(255.0/clusters.shape[0]),dtype=int); tif_x=np.dstack((tif_x,tif_x,tif_x)); scipy.misc.imsave(out_file_tif_viz,tif_x); img_paths_curr=[im_file,out_file_flo_viz]+out_files_tif; im_name=util.getFileNames([im_file],ext=False)[0]; captions_curr=[im_name,'flo_viz']+['tif_flo_viz']*len(out_files_tif) # if tifAsPng: # img_paths_curr.append(out_file_tif_viz.replace('_x.png','_y.png')); # captions_curr.append('tif_flo_viz'); img_paths_curr=[util.getRelPath(file_curr) for file_curr in img_paths_curr]; img_paths.append(img_paths_curr); captions.append(captions_curr); # idx=idx+1; visualize.writeHTML(out_file_html,img_paths,captions)
def comparativeLossViz(img_dirs, file_post, loss_post, range_batches, range_images, out_file_html, dir_server, img_caption_pre=None): img_files_all = [] captions_all = [] if img_caption_pre is not None: assert len(img_caption_pre) == len(img_dirs) for batch_num in range_batches: # range(1,num_batches+1): for im_num in range_images: for idx_img_dir, img_dir in enumerate(img_dirs): loss_all = np.load( os.path.join(img_dir, str(batch_num) + loss_post)) if im_num > loss_all.shape[0]: continue loss_curr = loss_all[im_num - 1, 0] loss_str = "{:10.4f}".format(loss_curr) files_curr = [ os.path.join( img_dir, str(batch_num) + '_' + str(im_num) + file_post_curr) for file_post_curr in file_post ] files_curr = [ util.getRelPath(file_curr, dir_server) for file_curr in files_curr ] captions_curr = [ os.path.split(file_curr)[1] + ' ' + loss_str for file_curr in files_curr ] if img_caption_pre is not None: captions_curr = [ img_caption_pre[idx_img_dir] + ' ' + caption_curr for caption_curr in captions_curr ] img_files_all.append(files_curr) captions_all.append(captions_curr) visualize.writeHTML(out_file_html, img_files_all, captions_all, 224, 224)
def makeImTifViz(img_paths_all,tif_paths_all,out_file_html,out_dir_tif,num_clusters=40,disk_path='/disk2'): out_files_tif_x=[os.path.join(out_dir_tif,img_name+'_x.png') for img_name in util.getFileNames(tif_paths_all,ext='False')]; out_files_tif_y=[os.path.join(out_dir_tif,img_name+'_y.png') for img_name in util.getFileNames(tif_paths_all,ext='False')]; for tif_path,out_file_x,out_file_y in zip(tif_paths_all,out_files_tif_x,out_files_tif_y): tif=scipy.misc.imread(tif_path); # print np.min(tif[:,:,:2]),np.max(tif[:,:,:2]) assert np.min(tif[:,:,:2])>0 and np.max(tif[:,:,:2])<num_clusters+1; saveTifGray(tif,out_file_x,out_file_y,num_clusters) # out_file_html=out_dir_tif+'.html'; img_paths_html=[[util.getRelPath(img_curr,disk_path) for img_curr in img_list] for img_list in zip(img_paths_all,out_files_tif_x,out_files_tif_y)]; # captions_html=[[util.getFileNames([img_curr],ext=False)[0] for img_curr in img_list] for img_list in zip(img_paths_all,out_files_tif_x,out_files_tif_y)]; captions_html=[['Image','Tif_x','Tif_y']]*len(img_paths_html); visualize.writeHTML(out_file_html,img_paths_html,captions_html);
def makeClusterHTML(out_file_html, labels, num_cols, size_im, dir_server): ims = [] captions = [] start_idx = 0 while start_idx < len(labels): row_curr = [] caption_curr = [] if start_idx + num_cols > len(labels): num_cols_real = len(labels) - start_idx else: num_cols_real = num_cols for col_no in range(num_cols_real): idx_curr = start_idx + col_no label_curr = labels[idx_curr] row_curr.append(util.getRelPath(label_curr, dir_server)) caption_curr.append('') ims.append(row_curr) captions.append(caption_curr) start_idx = start_idx + num_cols_real visualize.writeHTML(out_file_html, ims, captions, size_im, size_im) print out_file_html.replace(dir_server, 'http://vision1.idav.ucdavis.edu:1000')
def compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html): img_paths=[]; captions=[]; for dir_curr,out_dir_curr in zip(dirs,out_dirs): dict_for_plotting={'grads_mag':OrderedDict(),'weights_mag':OrderedDict(),'ratios':OrderedDict()}; dict_for_plotting=OrderedDict(dict_for_plotting.items()); for model_num_curr in model_num: print model_num_curr; file_pairs=[(os.path.join(dir_curr,model_num_curr,file_pre_grad+str(iter_curr)+'.npy'), os.path.join(dir_curr,model_num_curr,file_pre_weight+str(iter_curr)+'.npy')) for iter_curr in num_iters]; means,_=getMagInfo(file_pairs,alt=True); dict_for_plotting['grads_mag'][model_num_curr]=means[0]; dict_for_plotting['weights_mag'][model_num_curr]=means[1]; dict_for_plotting['ratios'][model_num_curr]=means[2]; img_paths_curr=[]; captions_curr=[]; for key_curr in dict_for_plotting.keys(): out_file_curr=os.path.join(out_dir_curr,key_curr+'.png'); data=dict_for_plotting[key_curr]; xAndYs=data.values(); legend_entries=data.keys(); xAndYs=[(range(len(x_curr)),x_curr) for x_curr in xAndYs]; visualize.plotSimple(xAndYs,out_file_curr,title=key_curr,xlabel='layer',ylabel='magnitude',legend_entries=legend_entries,outside=True); print out_file_curr.replace('/disk3','vision3.cs.ucdavis.edu:1001'); img_paths_curr.append(util.getRelPath(out_file_curr,'/disk3')); # print dir_curr.split('/'); captions_curr.append(dir_curr.split('/')[-2]+' '+dir_curr.split('/')[-1]+' '+key_curr); img_paths.append(img_paths_curr); captions.append(captions_curr); visualize.writeHTML(out_file_html,img_paths,captions,height=500,width=500); print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001');
def script_visualizeLossesFromExperiment(): # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet'; out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayerAlexNet'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel'; layers=['conv1','conv2','conv3','conv4','conv5','fc6']; # ,'fc7']; layers_str=[]; for idx in range(len(layers)): # if idx==0: # fix_layers=layers[0]; # layer_str=str(fix_layers); # else: fix_layers=layers[:idx+1]; layer_str='_'.join(fix_layers); layers_str.append(layer_str); log_files=[os.path.join(out_dir,'log_'+layer_str+'.log') for layer_str in layers_str]; str_match=' solver.cpp:209] Iteration '; xAndYs=[svl.getIterationsAndLosses(log_file,str_match) for log_file in log_files]; out_files=[]; for layer_str,log_file in zip(layers_str,log_files): xAndY=svl.getIterationsAndLosses(log_file,str_match); print xAndY out_file=os.path.join(out_dir,'loss_'+layer_str+'.png'); visualize.plotSimple([xAndY],out_file,title=layer_str); out_files.append(out_file); out_file_html=os.path.join(out_dir,'losses_all.html'); img_paths=[[util.getRelPath(out_file,'/disk3')] for out_file in out_files]; captions=[['']]*len(out_files); print img_paths print captions visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=300);
def script_writeHTMLForOverlap(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale_all, scale_images,lim_cases,gpu,model_file,clusters_file): img_dirs_all=[]; for scale in scale_all: for scale_image in scale_images: dir_scale=os.path.join(out_dir_scratch,scale+'_'+str(scale_image)); scale_info=pickle.load(open(out_file,'rb')); img_dirs=[os.path.join(dir_scale,im_curr_info[0]) for im_curr_info in scale_info[scale][:lim_cases]] img_dirs_all=img_dirs_all+img_dirs; # record_files_all=[]; img_files_record={}; for img_dir in img_dirs_all: record_files=[os.path.join(img_dir,file_curr) for file_curr in util.getFilesInFolder(img_dir,'.p')]; for record_file in record_files: record=pickle.load(open(record_file,'rb')); if len(record[0])==0: continue; img_name=img_dir[img_dir.rindex('/')+1:]; rel_files=[]; # print len(record[0]) # print record_file img_name_ac=record_file[record_file.rindex('/')+1:record_file.rindex('_')]; for idx_curr in range(len(record[0])): rel_file_curr=[]; rel_file_curr.append(os.path.join(img_dir,img_name_ac+'_'+str(idx_curr)+'_onImg.png')); rel_file_curr.append(os.path.join(img_dir,img_name_ac+'_'+str(idx_curr)+'.png')); rel_file_curr.append(os.path.join(img_dir+'_pred_flo_viz',img_name_ac+'_'+str(idx_curr)+'.png')); rel_file_curr.append(record[1][idx_curr]); rel_files.append(rel_file_curr); if img_name_ac in img_files_record: img_files_record[img_name_ac].extend(rel_files); else: img_files_record[img_name_ac]=rel_files; print len(img_files_record); # print img_files_record[img_files_record.keys()[0]]; out_file_html=os.path.join(out_dir_scratch,'visualize.html'); img_paths_html=[]; captions_html=[]; for img_name in img_files_record.keys(): img_paths_row=[]; captions_row=[]; rec=img_files_record[img_name]; rec_np=np.array(rec); print rec_np.shape sort_idx=np.argsort(rec_np[:,-1])[::-1]; for idx_curr in sort_idx[::5]: img_paths_row.extend(rec[idx_curr][:-1]); captions_row.extend([str(rec[idx_curr][-1]),str(rec[idx_curr][-1]),str(rec[idx_curr][-1])]); img_paths_row=[util.getRelPath(path_curr) for path_curr in img_paths_row]; img_paths_html.append(img_paths_row); captions_html.append(captions_row); visualize.writeHTML(out_file_html,img_paths_html,captions_html);
def main(): # dirs=['/disk3/maheen_data/headC_160_withFlow_human_xavier_unit_floStumpPretrained_fullTraining/gradient_checks']; # # dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct_16']]; # range_flow=range(1); # out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs]; # [util.mkdir(out_dir_curr) for out_dir_curr in out_dirs]; # model_num=range(5000,45000,5000); # model_num.append(45000); # print model_num # model_num=[str(model_num_curr) for model_num_curr in model_num] # num_iters=range(1,5); # # num_iters=range(2,3); # file_pre_weight='weight_mag_n_'; # file_pre_grad='grad_mag_n_'; # out_file_html=os.path.join(dirs[0],'comparison_grads_weights_ratios_n.html'); # compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html) # out_file_html=os.path.join(dirs[0],'comparison_grads_seg_no_seg_16.html'); # layer_range=[26,27,28,31]; # num_iters=range(1,5,2); # img_paths_seg_flow=compareGradInfoLayer([dirs[i] for i in range_flow],[out_dirs[i] for i in range_flow],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag') # layer_range=range(26,32); # num_iters=range(2,5,2); # img_paths_score_flow=compareGradInfoLayer([dirs[i] for i in range_flow],[out_dirs[i] for i in range_flow],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag') # img_paths=[img_paths_seg_flow,img_paths_score_flow]; # img_paths=[[util.getRelPath(path_curr,'/disk3') for path_curr in list_curr] for list_curr in img_paths]; # captions=[]; # for list_curr in img_paths: # captions_curr=[]; # for path in list_curr: # path_split=path.split('/'); # caption=path_split[-4]+' '+path_split[-3]; # captions_curr.append(caption); # print caption # captions.append(captions_curr); # visualize.writeHTML(out_file_html,img_paths,captions,height=500,width=500); # print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001'); # return # model_dir='/disk3/maheen_data/headC_160/withFlow_human_xavier_unit_floStumpPretrained_fullTraining/intermediate' # out_dir_meta='/disk3/maheen_data/headC_160_withFlow_human_xavier_unit_floStumpPretrained_fullTraining' # out_dir=os.path.join(out_dir_meta,'gradient_checks') # util.mkdir(out_dir); # params=[(model_dir,out_dir,'40')]; # out_file_commands_pre=os.path.join(out_dir_meta,'debug_commands_'); # path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_withFlow_debug.th' # model_num=range(5000,45000,5000); # model_num.append(45000); # print model_num # # return # writeCommandsForTrainDebug(params,path_to_train_file,out_file_commands_pre,model_num) # return dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug']; dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct']]; range_flow=range(1); # range_noflow=range(2,len(dirs)); # dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug','/disk3/maheen_data/headC_160/noFlow_human_debug']; # dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct_16','incorrect']]; # range_flow=range(2); # range_noflow=range(2,len(dirs)); out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs]; [util.mkdir(out_dir_curr) for out_dir_curr in out_dirs]; model_num=range(5000,100000,20000); model_num.append(100000); # model_num=range(2000,32000,6000); # model_num.append(32000); model_num=[str(model_num_curr) for model_num_curr in model_num] print model_num # num_iters=range(1,21); num_iters=range(2,3); file_pre_weight='weight_mag_n_'; file_pre_grad='grad_mag_n_'; out_file_html=os.path.join('/disk3/maheen_data/headC_160/withFlow_human_debug','comparison_grads_weights_ratios.html'); compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html) # compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html) out_file_html=os.path.join('/disk3/maheen_data/headC_160/withFlow_human_debug','comparison_grads_seg_no_seg.html'); layer_range=[26,27,28,31]; # layer_range=[27,28]; num_iters=range(3,21,2); img_paths_seg_flow=compareGradInfoLayer([dirs[i] for i in range_flow],[out_dirs[i] for i in range_flow],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag') layer_range=range(26,32); num_iters=range(2,21,2); img_paths_score_flow=compareGradInfoLayer([dirs[i] for i in range_flow],[out_dirs[i] for i in range_flow],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag') # layer_range=[13,14,17] # num_iters=range(1,21,2); # img_paths_seg_noflow=compareGradInfoLayer([dirs[i] for i in range_noflow],[out_dirs[i] for i in range_noflow],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag') # layer_range=range(13,18); # num_iters=range(2,21,2); # img_paths_score_noflow=compareGradInfoLayer([dirs[i] for i in range_noflow],[out_dirs[i] for i in range_noflow],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag') # layer_range=[26,27,28,31]; # num_iters=range(1,21,2); # img_paths_seg_flow=compareGradInfoLayer(dirs[1:],out_dirs[1:],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag') # layer_range=range(26,32); # num_iters=range(2,21,2); # num_iters=range(2,7,2); # img_paths_score_flow=compareGradInfoLayer(dirs[1:],out_dirs[1:],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag') # img_paths=[img_paths_seg_flow,img_paths_score_flow,img_paths_seg_noflow,img_paths_score_noflow]; img_paths=[img_paths_seg_flow,img_paths_score_flow]; img_paths=[[util.getRelPath(path_curr,'/disk3') for path_curr in list_curr] for list_curr in img_paths]; captions=[]; for list_curr in img_paths: captions_curr=[]; for path in list_curr: path_split=path.split('/'); caption=path_split[-4]+' '+path_split[-3]; captions_curr.append(caption); print caption captions.append(captions_curr); visualize.writeHTML(out_file_html,img_paths,captions,height=500,width=500); print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001'); return params=[('/disk3/maheen_data/headC_160/withFlow_xavier_16_score/intermediate','/disk3/maheen_data/headC_160/withFlow_human_debug/correct_16','40')]; out_file_commands_pre='/disk3/maheen_data/headC_160/withFlow_human_debug/debug_commands_16_'; path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_withFlow_debug.th' # params=[('/disk3/maheen_data/headC_160/noFlow_gaussian_human_softmax/intermediate_res','/disk3/maheen_data/headC_160/noFlow_human_debug/correct','40'), # ('/disk3/maheen_data/headC_160/noFlow_gaussian_human/intermediate','/disk3/maheen_data/headC_160/noFlow_human_debug/incorrect','56')]; # [util.mkdir(params_curr[1]) for params_curr in params]; # out_file_commands_pre='/disk3/maheen_data/headC_160/noFlow_human_debug/debug_commands_'; # path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_noFlow_debug.th' model_num=range(2000,32000,6000); model_num.append(32000); print model_num return writeCommandsForTrainDebug(params,path_to_train_file,out_file_commands_pre,model_num) # maheen_data/headC_160/withFlow_xavier_16_score/intermediate/ return dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug','/disk3/maheen_data/headC_160/noFlow_human_debug']; # out_file_html=os.path.join(dirs[0],'comparison_dloss_seg_score.html'); dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct']]; out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs]; file_seg = 'loss_seg.npy'; file_score = 'loss_score.npy'; num_iters=range(2,21); model_num=range(5000,100000,20000); model_num.append(100000); model_num=[str(model_num_curr) for model_num_curr in model_num] img_paths=[]; captions=[]; for dir_curr,out_dir_curr in zip(dirs,out_dirs): dict_for_plotting={'loss_seg_all':OrderedDict(),'loss_score_all':OrderedDict(),'loss_ratio_all':OrderedDict()}; for model_num_curr in model_num: file_curr_seg=os.path.join(dir_curr,model_num_curr,file_seg); file_curr_score=os.path.join(dir_curr,model_num_curr,file_score); score_all=np.load(file_curr_score); score_all=score_all[[0]+range(1,len(score_all),2)]; score_all=score_all*32 seg_all=np.load(file_curr_seg); seg_all=seg_all[range(0,len(seg_all),2)]; ratios=seg_all/score_all; print dir_curr,model_num_curr print np.mean(score_all),np.mean(seg_all),np.mean(ratios); # break; # break; # if num_iter_curr==2: # score_all.append(np.load(file_curr_score)); # seg_all.append(np.load(file_curr_seg)); # elif num_iter_curr%2==0: # score_all.append(np.load(file_curr_score)); # else: # seg_all.append(np.load(file_curr_seg)); # seg_curr=np.load(file_curr_seg); # seg_curr=np.unique(np.ravel(seg_curr)); # score_curr=list(np.load(file_curr_score)); # score_curr=list(np.unique(np.ravel(score_curr))); # seg_all.extend(seg_curr); # score_all.extend(score_curr); # seg_all=list(set(seg_all)); # score_all=list(set(score_all)); # seg_all.sort(); # score_all.sort(); # dict_for_plotting['seg_all'][model_num_curr]=seg_all; # dict_for_plotting['score_all'][model_num_curr]=score_all; # img_paths_curr=[]; # captions_curr=[]; # for key_curr in dict_for_plotting.keys(): # out_file_curr=os.path.join(out_dir_curr,key_curr+'.png'); # data=dict_for_plotting[key_curr]; # xAndYs=data.values(); # legend_entries=data.keys(); # xAndYs=[(range(len(x_curr)),x_curr) for x_curr in xAndYs]; # visualize.plotSimple(xAndYs,out_file_curr,title=key_curr,xlabel='sorted idx',ylabel='values',legend_entries=legend_entries,outside=True); # print out_file_curr.replace('/disk3','vision3.cs.ucdavis.edu:1001'); # img_paths_curr.append(util.getRelPath(out_file_curr,'/disk3')); # # print dir_curr.split('/'); # captions_curr.append(dir_curr.split('/')[-2]+' '+dir_curr.split('/')[-1]+' '+key_curr); # img_paths.append(img_paths_curr); # captions.append(captions_curr); # visualize.writeHTML(out_file_html,img_paths,captions,height=200,width=200); # print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001'); return dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug','/disk3/maheen_data/headC_160/noFlow_human_debug']; out_file_html=os.path.join(dirs[0],'comparison_dloss_seg_score.html'); dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct']]; out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs]; file_seg_pre='dloss_seg_'; file_score_pre='dloss_score_'; num_iters=range(2,21); model_num=range(25000,100000,20000); model_num.append(100000); model_num=[str(model_num_curr) for model_num_curr in model_num] img_paths=[]; captions=[]; for dir_curr,out_dir_curr in zip(dirs,out_dirs): dict_for_plotting={'seg_all':OrderedDict(),'score_all':OrderedDict()}; for model_num_curr in model_num: seg_all=[]; score_all=[]; for num_iter_curr in num_iters: file_curr_seg=os.path.join(dir_curr,model_num_curr,file_seg_pre+str(num_iter_curr)+'.npy'); file_curr_score=os.path.join(dir_curr,model_num_curr,file_score_pre+str(num_iter_curr)+'.npy'); seg_curr=np.load(file_curr_seg); seg_curr=np.unique(np.ravel(seg_curr)); score_curr=list(np.load(file_curr_score)); score_curr=list(np.unique(np.ravel(score_curr))); seg_all.extend(seg_curr); score_all.extend(score_curr); seg_all=list(set(seg_all)); score_all=list(set(score_all)); seg_all.sort(); score_all.sort(); dict_for_plotting['seg_all'][model_num_curr]=seg_all; dict_for_plotting['score_all'][model_num_curr]=score_all; img_paths_curr=[]; captions_curr=[]; for key_curr in dict_for_plotting.keys(): out_file_curr=os.path.join(out_dir_curr,key_curr+'.png'); data=dict_for_plotting[key_curr]; xAndYs=data.values(); legend_entries=data.keys(); xAndYs=[(range(len(x_curr)),x_curr) for x_curr in xAndYs]; visualize.plotSimple(xAndYs,out_file_curr,title=key_curr,xlabel='sorted idx',ylabel='values',legend_entries=legend_entries,outside=True); print out_file_curr.replace('/disk3','vision3.cs.ucdavis.edu:1001'); img_paths_curr.append(util.getRelPath(out_file_curr,'/disk3')); # print dir_curr.split('/'); captions_curr.append(dir_curr.split('/')[-2]+' '+dir_curr.split('/')[-1]+' '+key_curr); img_paths.append(img_paths_curr); captions.append(captions_curr); visualize.writeHTML(out_file_html,img_paths,captions,height=200,width=200); print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001'); # dloss_seg=np.load(file_curr_seg); # dloss_seg=np.mean(dloss_seg,axis=0); # print dloss_seg.shape; # dloss_seg=dloss_seg[0]; # print dloss_seg.shape; # dloss_score=np.load(file_curr_score); # print dloss_score.shape; # print np.min(dloss_score); # print np.max(dloss_score); # print np.min(dloss_seg); # print np.max(dloss_seg); # # print dloss_seg[0],np.min(dloss_seg),np.max(dloss_seg); # print file_curr; return dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug','/disk3/maheen_data/headC_160/noFlow_human_debug']; out_file_html=os.path.join('/disk3/maheen_data/headC_160/withFlow_human_debug','comparison_grads_weights_ratios.html'); # dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug']; out_file_html=os.path.join('/disk3/maheen_data/headC_160/withFlow_human_debug','comparison_grads_seg_no_seg.html'); dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct','incorrect']]; out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs]; [util.mkdir(out_dir_curr) for out_dir_curr in out_dirs]; model_num=range(5000,100000,20000); model_num.append(100000); model_num=[str(model_num_curr) for model_num_curr in model_num] num_iters=range(1,21); file_pre_weight='weight_mag_'; file_pre_grad='grad_mag_'; # file_curr=os.path.join(dirs[0],model_num[-1],file_pre_grad+'1.npy'); # grads=np.load(file_curr); # print grads.shape; # grads=grads[::2]; # print grads.shape; # print grads[26:] # compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html) # layer_range=range(26,32); layer_range=[26,27,28,31]; num_iters=range(1,21,2); img_paths_seg_flow=compareGradInfoLayer(dirs[:2],out_dirs[:2],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag') layer_range=range(26,32); num_iters=range(2,21,2); img_paths_score_flow=compareGradInfoLayer(dirs[:2],out_dirs[:2],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag') # layer_range=range(13,18) layer_range=[13,14,17] num_iters=range(1,21,2); img_paths_seg_noflow=compareGradInfoLayer(dirs[2:],out_dirs[2:],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag') layer_range=range(13,18); num_iters=range(2,21,2); img_paths_score_noflow=compareGradInfoLayer(dirs[2:],out_dirs[2:],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag') img_paths=[img_paths_seg_flow,img_paths_score_flow,img_paths_seg_noflow,img_paths_score_noflow]; img_paths=[[util.getRelPath(path_curr,'/disk3') for path_curr in list_curr] for list_curr in img_paths]; # print img_paths captions=[]; # path='../../../../../../..//maheen_data/headC_160/noFlow_human_debug/incorrect/plots/score_grad_mag.png' for list_curr in img_paths: captions_curr=[]; for path in list_curr: path_split=path.split('/'); caption=path_split[-4]+' '+path_split[-3]; captions_curr.append(caption); print caption captions.append(captions_curr); visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=300); print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001'); # out_files=compareGradInfoLayer(dirs,out_dirs,model_num,num_iters,file_pre_grad,layer_range) # break; return params=[('/disk3/maheen_data/headC_160/withFlow_gaussian_human_softmax/intermediate','/disk3/maheen_data/headC_160/withFlow_human_debug/correct','40'), ('/disk3/maheen_data/headC_160/withFlow_gaussian_human/intermediate','/disk3/maheen_data/headC_160/withFlow_human_debug/incorrect','56')]; out_file_commands_pre='/disk3/maheen_data/headC_160/withFlow_human_debug/debug_commands_'; path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_withFlow_debug.th' # params=[('/disk3/maheen_data/headC_160/noFlow_gaussian_human_softmax/intermediate_res','/disk3/maheen_data/headC_160/noFlow_human_debug/correct','40'), # ('/disk3/maheen_data/headC_160/noFlow_gaussian_human/intermediate','/disk3/maheen_data/headC_160/noFlow_human_debug/incorrect','56')]; # [util.mkdir(params_curr[1]) for params_curr in params]; # out_file_commands_pre='/disk3/maheen_data/headC_160/noFlow_human_debug/debug_commands_'; # path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_noFlow_debug.th' model_num=range(5000,100000,20000); model_num.append(100000); writeCommandsForTrainDebug(params,path_to_train_file,out_file_commands_pre,model_num) return print 'hello'; dir_debug_with_flow='/disk3/maheen_data/headC_160/withFlow_human_debug'; dir_debug_no_flow='/disk3/maheen_data/headC_160/noFlow_human_debug'; score_dir='score_gradient_start'; seg_dir='seg_gradient_start'; dirs=[os.path.join(dir_debug_no_flow,score_dir),os.path.join(dir_debug_with_flow,score_dir), os.path.join(dir_debug_no_flow,seg_dir),os.path.join(dir_debug_with_flow,seg_dir)] for dir_curr in dirs: np_files=util.getFilesInFolder(dir_curr,'.npy'); np_nums=[int(file_curr[file_curr.rindex('_')+1:file_curr.rindex('.')]) for file_curr in np_files]; sort_idx=np.argsort(np_nums); np_files=np.array(np_files)[sort_idx]; gradients=getGradientMags(np_files); print dir_curr; print len(gradients),np.mean(gradients),min(gradients),max(gradients);
def main(): out_dir='/disk2/mayExperiments/eval_nC_zS_youtube'; model_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/opt_noFix_conv1_conv2_conv3_conv4_conv5_llr__iter_50000.caffemodel'; clusters_file='/disk3/maheen_data/youtube_train_40/clusters_100000.mat'; flo_dir_meta=os.path.join(out_dir,'ft_youtube_model') flo_dir=os.path.join(flo_dir_meta,'flo'); match_info_file=os.path.join(flo_dir,'match_info.txt'); train_val_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/train_val_conv1_conv2_conv3_conv4_conv5.prototxt'; out_dir_flo_viz_org=os.path.join(flo_dir_meta,'flo_viz'); gpu=0; h5_files,img_files,img_sizes=po.parseInfoFile(match_info_file); file_names=util.getFileNames(img_files,ext=False) out_dirs_flo_viz=[out_dir_flo_viz_org]; n_range=[5,10]; n_range_str='_'.join([str(n) for n in n_range]); out_file_html=os.path.join(out_dir,'flo_n_'+n_range_str+'.html'); for n in n_range: out_dir_flo=os.path.join(flo_dir_meta,'flo_n_'+str(n)); out_dir_flo_viz=os.path.join(flo_dir_meta,'flo_n_'+str(n)+'_viz'); out_file_sh=out_dir_flo_viz+'.sh' util.mkdir(out_dir_flo); util.mkdir(out_dir_flo_viz); out_dirs_flo_viz.append(out_dir_flo_viz) flo_files=[os.path.join(out_dir_flo,file_name+'.flo') for file_name in file_names]; for h5_file,img_size,flo_file in zip(h5_files,img_sizes,flo_files): flow_resize=getMatFromH5TopN(h5_file,img_size,clusters_file,n); util.writeFlowFile(flow_resize,flo_file); out_files_viz=[os.path.join(out_dir_flo_viz,file_name+'.png') for file_name in file_names]; po.writeScriptToGetFloViz(flo_files,out_files_viz,out_file_sh); subprocess.call('sh '+out_file_sh,shell=True); img_paths_html=[]; captions_html=[]; for img_file,file_name in zip(img_files,file_names): row_curr=[img_file]; caption_curr=[file_name]; for out_dir_flo_curr in out_dirs_flo_viz: file_curr=os.path.join(out_dir_flo_curr,file_name+'.png'); row_curr.append(file_curr); caption_curr.append(util.getFileNames([out_dir_flo_curr])[0]); row_curr=[util.getRelPath(f) for f in row_curr]; img_paths_html.append(row_curr); captions_html.append(caption_curr); util.writeHTML(out_file_html,img_paths_html,captions_html); print out_file_html.replace('/disk2','vision3.cs.ucdavis.edu:1000/'); print 'hello';
def main(): out_dir_training_files='/disk3/maheen_data/flo_only_training_files'; # model='/disk2/aprilExperiments/headC_160/withFlow_gaussian_xavier_fixed_unitCombo_floStumpPretrained.dat' # out_dir='/disk3/maheen_data/headC_160/withFlow_human_xavier_unit_floStumpPretrained_fullTraining' model='/disk3/maheen_data/headC_160/withFlow_human_xavier_unit_floStumpPretrained_fullTraining/intermediate/model_all_45000.dat' out_dir='/disk3/maheen_data/headC_160/withFlow_human_xavier_unit_floStumpPretrained_fullTraining_res' util.mkdir(out_dir); out_file_pos_train=os.path.join(out_dir_training_files,'pos_human_flo_train.txt'); out_file_neg_train=os.path.join(out_dir_training_files,'neg_human_flo_train.txt'); out_file_pos_val=os.path.join(out_dir_training_files,'pos_human_flo_val.txt'); out_file_neg_val=os.path.join(out_dir_training_files,'neg_human_flo_val.txt'); out_file_neg='/disk3/maheen_data/headC_160/neg_flos/negatives_onlyHuman_withFlow.txt' out_file_pos='/disk3/maheen_data/headC_160/noFlow_gaussian_human/pos_flos/positives_onlyHuman_withFlow.txt' # shortenTrainingData(out_file_pos,out_file_pos_train,0.9,val_txt_new=out_file_pos_val) # shortenTrainingData(out_file_neg,out_file_neg_train,0.9,val_txt_new=out_file_neg_val) learningRate=0.00001; path_to_th='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_withFlow_cl.th'; iterations=55000; out_log_curr=os.path.join(out_dir,'log.txt'); out_im_pre=os.path.join(out_dir,'loss'); command_curr='python script_visualizeLoss.py -log_file '+'/disk3/maheen_data/headC_160/withFlow_human_xavier_unit_floStumpPretrained_fullTraining/log.txt'+' '+out_log_curr+' -out_file_pre '+out_im_pre+' -val '; print command_curr; os.system(command_curr); return command=printCommandToTrain(path_to_th,model,out_dir,iterations=iterations,learningRate=learningRate,pos_file=out_file_pos_train, pos_val_file=out_file_pos_val,neg_file=out_file_neg_train,neg_val_file=out_file_neg_val,testAfter=40); # util.writeFile(os.path.join(out_dir,'train_command.sh'),[command]); return out_dir_training_files='/disk3/maheen_data/flo_only_training_files'; num=64; out_file_neg='/disk3/maheen_data/headC_160/neg_flos/negatives_onlyHuman_withFlow.txt' out_file_pos='/disk3/maheen_data/headC_160/noFlow_gaussian_human/pos_flos/positives_onlyHuman_withFlow.txt' out_file_pos_mini=os.path.join(out_dir_training_files,'pos_human_withflo_'+str(num)+'.txt') out_file_neg_mini=os.path.join(out_dir_training_files,'neg_human_withflo_'+str(num)+'.txt'); # shortenTrainingData(out_file_pos,out_file_pos_mini,num); # shortenTrainingData(out_file_neg,out_file_neg_mini,num); path_to_th='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_withFlow_cl.th'; model='/disk2/aprilExperiments/headC_160/withFlow_gaussian_xavier_fixed.dat'; out_dir='/disk3/maheen_data/headC_160/withFlow_human_xavier_unit_lr_search'; out_file_html=os.path.join(out_dir,'comparison.html'); out_file_sh = os.path.join(out_dir,'lr_search.sh'); util.mkdir(out_dir); iterations=1000; commands=[]; img_paths=[]; captions=[]; for learningRate in [0.001,0.0001,0.00001,0.000001, 0.0000001]: out_dir_curr=os.path.join(out_dir,str(learningRate)); util.mkdir(out_dir_curr); out_log_curr=os.path.join(out_dir_curr,'log.txt'); out_im_pre=os.path.join(out_dir_curr,'loss'); command_curr='python script_visualizeLoss.py -log_file '+out_log_curr+' -out_file_pre '+out_im_pre; print command_curr; os.system(command_curr); img_paths_curr=[util.getRelPath(out_im_pre+post,'/disk3') for post in ['_score.png','_seg.png']]; captions_curr=[str(learningRate)+' '+file_curr for file_curr in util.getFileNames(img_paths_curr)]; captions.append(captions_curr); img_paths.append(img_paths_curr); # command=printCommandToTrain(path_to_th,model,out_dir_curr,iterations=iterations,learningRate=learningRate,pos_file=out_file_pos_mini, # pos_val_file=out_file_pos_mini,neg_file=out_file_neg_mini,neg_val_file=out_file_neg_mini); # commands.append(command); # break; # util.writeFile(out_file_sh,commands); # print 'sh '+out_file_sh visualize.writeHTML(out_file_html,img_paths,captions); return out_dir_training_files='/disk3/maheen_data/flo_only_training_files'; out_dir='/disk3/maheen_data/headC_160/onlyFlow_human_xavier_fix_full'; out_file_html=os.path.join(out_dir,'comparison_loss.html'); out_file_sh=os.path.join(out_dir,'lr_commands.sh'); util.mkdir(out_dir_training_files); out_file_pos=os.path.join(out_dir_training_files,'pos_human_only_flo.txt'); out_file_neg=os.path.join(out_dir_training_files,'neg_human_only_flo.txt'); out_file_pos_train=os.path.join(out_dir_training_files,'pos_human_only_flo_train.txt'); out_file_neg_train=os.path.join(out_dir_training_files,'neg_human_only_flo_train.txt'); out_file_pos_val=os.path.join(out_dir_training_files,'pos_human_only_flo_val.txt'); out_file_neg_val=os.path.join(out_dir_training_files,'neg_human_only_flo_val.txt'); # shortenTrainingData(out_file_pos,out_file_pos_train,0.9,val_txt_new=out_file_pos_val) # shortenTrainingData(out_file_neg,out_file_neg_train,0.9,val_txt_new=out_file_neg_val) path_to_th='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_noFlow_cl.th'; model='/disk3/maheen_data/headC_160/onlyFlow_human_xavier_fix_full/1e-05/intermediate/model_all_35000.dat'; # out_file_html=os.path.join(out_dir,'loss_comparison.html'); iterations=65000; testAfter=40; util.mkdir(out_dir); commands=[]; img_paths=[]; captions=[]; for learningRate in [0.000001]: out_dir_curr=os.path.join(out_dir,str(learningRate*10)+'_res'); util.mkdir(out_dir_curr); # command_curr='python script_visualizeLoss.py '+os.path.join(out_dir_curr,'log.txt')+' '+os.path.join(out_dir_curr,'loss'); # os.system(command_curr); # img_paths.append([util.getRelPath(os.path.join(out_dir_curr,'loss'+post_curr),'/disk3') for post_curr in ['_seg.png','_score.png']]); # captions.append([str(learningRate)+' '+file_name for file_name in util.getFileNames(img_paths[-1])]); command=printCommandToTrain(path_to_th,model,out_dir_curr,iterations=iterations,learningRate=learningRate,pos_file=out_file_pos_train, pos_val_file=out_file_pos_val,neg_file=out_file_neg_train,neg_val_file=out_file_neg_val,testAfter=testAfter); print command # commands.append(command); # util.writeFile(out_file_sh,commands); # print out_file_sh return out_dir_training_files='/disk3/maheen_data/flo_only_training_files'; out_dir='/disk3/maheen_data/headC_160/onlyFlow_human_xavier_fix_full'; out_file_html=os.path.join(out_dir,'comparison_loss.html'); out_file_sh=os.path.join(out_dir,'lr_commands.sh'); util.mkdir(out_dir_training_files); out_file_pos=os.path.join(out_dir_training_files,'pos_human_only_flo.txt'); out_file_neg=os.path.join(out_dir_training_files,'neg_human_only_flo.txt'); out_file_pos_train=os.path.join(out_dir_training_files,'pos_human_only_flo_train.txt'); out_file_neg_train=os.path.join(out_dir_training_files,'neg_human_only_flo_train.txt'); out_file_pos_val=os.path.join(out_dir_training_files,'pos_human_only_flo_val.txt'); out_file_neg_val=os.path.join(out_dir_training_files,'neg_human_only_flo_val.txt'); # shortenTrainingData(out_file_pos,out_file_pos_train,0.9,val_txt_new=out_file_pos_val) # shortenTrainingData(out_file_neg,out_file_neg_train,0.9,val_txt_new=out_file_neg_val) path_to_th='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_noFlow_cl.th'; model='/disk2/aprilExperiments/headC_160/noFlow_gaussian_xavier_fixed.dat'; out_file_html=os.path.join(out_dir,'loss_comparison.html'); iterations=100000; testAfter=40; util.mkdir(out_dir); commands=[]; img_paths=[]; captions=[]; for learningRate in [0.00001,0.000001]: out_dir_curr=os.path.join(out_dir,str(learningRate)); util.mkdir(out_dir_curr); command_curr='python script_visualizeLoss.py '+os.path.join(out_dir_curr,'log.txt')+' '+os.path.join(out_dir_curr,'loss'); os.system(command_curr); img_paths.append([util.getRelPath(os.path.join(out_dir_curr,'loss'+post_curr),'/disk3') for post_curr in ['_seg.png','_score.png']]); captions.append([str(learningRate)+' '+file_name for file_name in util.getFileNames(img_paths[-1])]); # command=printCommandToTrain(path_to_th,model,out_dir_curr,iterations=iterations,learningRate=learningRate,pos_file=out_file_pos_train, # pos_val_file=out_file_pos_val,neg_file=out_file_neg_train,neg_val_file=out_file_neg_val,testAfter=testAfter); # commands.append(command); # util.writeFile(out_file_sh,commands); # print out_file_sh visualize.writeHTML(out_file_html,img_paths,captions,500,500); return num=64; out_file_pos_mini=os.path.join(out_dir_training_files,'pos_human_only_flo_'+str(num)+'.txt') out_file_neg_mini=os.path.join(out_dir_training_files,'neg_human_only_flo_'+str(num)+'.txt'); # shortenTrainingData(out_file_pos,out_file_pos_mini,num); # shortenTrainingData(out_file_neg,out_file_neg_mini,num); path_to_th='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_noFlow_cl.th'; model='/disk2/aprilExperiments/headC_160/noFlow_gaussian_xavier_fixed_unitCombo.dat'; iterations=1000; util.mkdir(out_dir); commands=[]; img_paths=[]; captions=[]; for learningRate in [0.001,0.0001,0.00001,0.000001, 0.0000001]: out_dir_curr=os.path.join(out_dir,str(learningRate)); util.mkdir(out_dir_curr); out_log_curr=os.path.join(out_dir_curr,'log.txt'); out_im_pre=os.path.join(out_dir_curr,'loss'); # command_curr='python script_visualizeLoss.py '+out_log_curr+' '+out_im_pre; # print command_curr; # os.system(command_curr); # img_paths_curr=[util.getRelPath(out_im_pre+post,'/disk3') for post in ['_score.png','_seg.png']]; # captions_curr=[str(learningRate)+' '+file_curr for file_curr in util.getFileNames(img_paths_curr)]; # captions.append(captions_curr); # img_paths.append(img_paths_curr); command=printCommandToTrain(path_to_th,model,out_dir_curr,iterations=iterations,learningRate=learningRate,pos_file=out_file_pos_mini, pos_val_file=out_file_pos_mini,neg_file=out_file_neg_mini,neg_val_file=out_file_neg_mini); print command; commands.append(command); # util.writeFile(out_file_sh,commands); # print out_file_sh # visualize.writeHTML(out_file_html,img_paths,captions); return dir_curr='/disk3/maheen_data/headC_160/withFlow_xavier_16_score/intermediate'; range_files=range(2000,96000,2000); to_del=range(4000,94000,4000); for model_num in to_del: file_curr=os.path.join(dir_curr,'model_all_'+str(model_num)+'.dat'); os.remove(file_curr); print file_curr; # assert os.path.exists(file_curr); return out_dir='/disk3/maheen_data/flo_only_training_files'; util.mkdir(out_dir); out_file_pos=os.path.join(out_dir,'pos_human_only_flo.txt'); out_file_neg=os.path.join(out_dir,'neg_human_only_flo.txt'); neg_flo='/disk3/maheen_data/headC_160/neg_flos/negatives_onlyHuman_withFlow.txt'; pos_flo='/disk3/maheen_data/headC_160/noFlow_gaussian_human/pos_flos/positives_onlyHuman_withFlow.txt'; convertFileToFloOnly(neg_flo,out_file_neg) convertFileToFloOnly(pos_flo,out_file_pos); return pos_all=util.readLinesFromFile(pos_all); pos_flo=util.readLinesFromFile(pos_flo); neg_all=util.readLinesFromFile(neg_all); neg_flo=util.readLinesFromFile(neg_flo); print pos_all[0]; print pos_flo[0]; print '___'; print neg_all[0]; print neg_flo[0]; return pos_human='/disk2/aprilExperiments/positives_160.txt'; neg_human='/disk2/marchExperiments/deep_proposals/negatives.txt'; pos_human_small='/disk2/aprilExperiments/positives_160_oneHundreth.txt'; neg_human_small='/disk2/marchExperiments/deep_proposals/negatives_oneHundreth.txt'; ratio_txt=100; shortenTrainingData(pos_human,pos_human_small,ratio_txt); shortenTrainingData(neg_human,neg_human_small,ratio_txt); return files=['/disk3/maheen_data/headC_160/withFlow_human_debug/correct/100000/im_layer_weights_1.npy', '/disk3/maheen_data/headC_160/withFlow_human_debug/correct/100000/flo_layer_weights_1.npy', '/disk3/maheen_data/headC_160/withFlow_human_debug/incorrect/100000/im_layer_weights_1.npy', '/disk3/maheen_data/headC_160/withFlow_human_debug/incorrect/100000/flo_layer_weights_1.npy']; for file_curr in files: out_dir_curr=os.path.join(file_curr.rsplit('/',2)[0],'plots'); weights=np.load(file_curr); # print weights.shape; # weights=weights[:,:,:2,:]; # print weights.shape # weights=weights.transpose((0,2,3,1)); # print weights.shape data=vis_square(weights); # print data.shape out_file=os.path.join(out_dir_curr,util.getFileNames([file_curr],ext=False)[0]+'.png') print out_file; scipy.misc.imsave(out_file,data);
def main(): # data='/home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt' # # /home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt # to_search=\ # ['/home/SSD3/maheen-data/horse_project/data_check/horse/im/horses_pascal_selected/2009_004662.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/horses_pascal_selected/2009_004662.npy', # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_11539.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_11539.npy', # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_16786.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_16786.npy', # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_4338.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_4338.npy'] # data=util.readLinesFromFile(data); # print data[0]; # to_search=[file_curr.replace('data_check','data_resize') for file_curr in to_search]; # idx_lines=[data.index(line_curr) for line_curr in to_search if line_curr in data]; # print idx_lines; # for idx_line_curr in idx_lines: # print 'batch_no',(idx_line_curr)/64 # # npy_files=[file_curr[file_curr.index(' ')+1:] for file_curr in data]; # # print npy_files[0]; # # print len(npy_files); # # p=multiprocessing.Pool(multiprocessing.cpu_count()); # # problem_files=p.map(findProblemNPYMP,npy_files); # # problem_files=[file_curr for file_curr in problem_files if file_curr is not None]; # # print (len(problem_files)); # return # data='/home/laoreja/new-deep-landmark/train/vanilla/aflw_224/aflw_vanilla_val_224.txt'; # data='/home/laoreja/new-deep-landmark/train/vanilla/aflw_224/aflw_vanilla_train_224_weight.txt'; # data=util.readLinesFromFile(data); # print data; # total=0; # for h5_file_curr in data: # with h5py.File(h5_file_curr,'r') as hf: # print('List of arrays in this file: ', hf.keys()) # data = hf.get('confidence') # np_data = np.array(data) # total=total+np_data.shape[0]; # print('Shape of the array dataset_1: ', np_data.shape) # print total; # return # horse_path='/home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt' # human_path_noIm='/home/SSD3/maheen-data/horse_project/data_resize/aflw/matches_5_train_allKP_noIm.txt' # human_path='/home/SSD3/maheen-data/horse_project/data_resize/aflw/matches_5_train_allKP.txt' # paths=[horse_path,human_path_noIm,human_path]; # out_files=[file_curr[:file_curr.rindex('.')]+'_dummy.txt' for file_curr in paths]; # for file_curr,out_file_curr in zip(paths,out_files): # data_curr=util.readLinesFromFile(file_curr); # data_curr=data_curr[0:50:5]; # # print data_curr; # print len(data_curr); # util.writeFile(out_file_curr,data_curr); # print out_file_curr; # return # im_path= "/home/SSD3/maheen-data/horse_project/data_resize/horse/im/_04_Aug16_png/horse+head12.jpg" # # 2 : "/home/SSD3/maheen-data/horse_project/data_resize/horse/npy/_04_Aug16_png/horse+head12.npy" # # "/home/SSD3/maheen-data/horse_project/data_resize/aflw/im/0/image67102_20650.jpg" # np_path="/home/SSD3/maheen-data/horse_project/data_resize/horse/npy/_04_Aug16_png/horse+head12.npy" # # "/home/SSD3/maheen-data/horse_project/data_resize/aflw/npy/0/image67102_20650.npy" # # im=scipy.misc.read(im_path); # im=cv2.imread(im_path); # labels=np.load(np_path); # print labels # for i in xrange(labels.shape[0]): # cv2.circle(im, (labels[i][0], labels[i][1]), 2, (0,0,255), -1) # cv2.imwrite('/home/SSD3/maheen-data/temp/check.png', im) # return # path_to_th='/home/maheenrashid/Downloads/horses/torch/test_tps_cl.th'; # iterations=10; # out_dir_models='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam' # model_pre=os.path.join(out_dir_models,'intermediate','model_all_'); # model_post='.dat'; # range_models=range(450,4500+1,450); # out_dir_meta=os.path.join(out_dir_models,'test_overtime'); # batch_size=60; # # commands=generateTPSTestCommands(path_to_th,batch_size,iterations,model_pre,model_post,range_models,out_dir_meta) # # print len(commands); # # print commands[0]; # # out_file_commands=os.path.join(out_dir_meta+'.sh'); # # util.writeFile(out_file_commands,commands); # dir_server='/home/SSD3/maheen-data'; # range_batches=range(1,10); # # batch_size=60; # range_images=range(1,61,5); # img_dir_meta='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_overtime' # img_dir=[os.path.join(img_dir_meta,'model_all_'+str(range_model_curr)) for range_model_curr in range_models] # out_file_html='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz.html' # file_post=['_horse.jpg','_human.jpg','_gtwarp.jpg','_predwarp.jpg'] # loss_post='_loss.npy'; # out_file_html=img_dir_meta+'.html'; # img_caption_pre=[str(model_num) for model_num in range_models]; # comparativeLossViz(img_dir,file_post,loss_post,range_batches,range_images,out_file_html,dir_server,img_caption_pre) # return dir_server = '/home/SSD3/maheen-data' range_batches = range(1, 9) # batch_size=60; range_images = range(1, 129, 5) img_dir = ['/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz/'] # out_file_html='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz.html' img_dir = [ '/home/SSD3/maheen-data/horse_project/tps_train_allKP_adam/test_viz' ] out_file_html = '/home/SSD3/maheen-data/horse_project/tps_train_allKP_adam/test_viz.html' file_post = ['_horse.jpg', '_human.jpg', '_gtwarp.jpg', '_predwarp.jpg'] loss_post = '_loss.npy' comparativeLossViz(img_dir, file_post, loss_post, range_batches, range_images, out_file_html, dir_server) return img_files = [] caption_files = [] out_dir = '/home/SSD3/maheen-data/training_kp_withWarp_test_debug_tps_adam' out_dir = '/home/SSD3/maheen-data/testing_5_kp_withWarp_fixed_adam_debug' out_dir = '/home/SSD3/maheen-data/training_5_kp_withWarp_fixed_adam__1e-05/test' dir_server = '/home/SSD3/maheen-data' out_file_html = os.path.join(out_dir, 'viz.html') for i in range(1, 94): im_file = os.path.join(out_dir, str(i) + '_org.jpg') warp_file = os.path.join(out_dir, str(i) + '_warp.jpg') im_file_small = os.path.join(out_dir, str(i) + '_small_org.jpg') warp_file_small = os.path.join(out_dir, str(i) + '_small_warp.jpg') im_file = util.getRelPath(im_file, dir_server) warp_file = util.getRelPath(warp_file, dir_server) im_file_small = util.getRelPath(im_file_small, dir_server) warp_file_small = util.getRelPath(warp_file_small, dir_server) img_files.append([im_file, warp_file]) # ,im_file_small,warp_file_small]); caption_files.append([str(i) + ' org', str(i) + ' warp']) # ,'small_org','small_warp']); visualize.writeHTML(out_file_html, img_files, caption_files, 224, 224) return out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' num_neighbors = 5 out_file_human = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt') out_file_human_new = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP_noIm.txt') modifyHumanFile(out_file_human, out_file_human_new) # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; out_file_human = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP.txt') out_file_human_new = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP_noIm.txt') modifyHumanFile(out_file_human, out_file_human_new) return # matches_file='/home/maheenrashid/Downloads/knn_5_points_train_list_clean.txt' # face_data_file='/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt'; # # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt'; # face_data_list_file='/home/SSD3/maheen-data/aflw_data/npy/data_list.txt'; # out_dir_meta_horse='/home/SSD3/maheen-data/horse_project/horse'; # out_dir_meta_horse_list=[os.path.join(out_dir_meta_horse,'im'),os.path.join(out_dir_meta_horse,'npy')]; # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; # out_dir_meta_face_list=[os.path.join(out_dir_meta_face,'im'),os.path.join(out_dir_meta_face,'npy')]; # out_dir_meta_face_old='/home/SSD3/maheen-data/horse_project/face'; # out_dir_meta_face_old_list=[os.path.join(out_dir_meta_face_old,'im'),os.path.join(out_dir_meta_face_old,'npy')]; # num_neighbors=5; # out_file_face=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP.txt'); # out_file_horse=os.path.join(out_dir_meta_horse,'matches_'+str(num_neighbors)+'_train_fiveKP.txt'); # missing_files=makeMatchFile(num_neighbors,matches_file,face_data_file,out_dir_meta_horse_list,out_dir_meta_face_list,out_file_horse,out_file_face,out_dir_meta_face_old_list) # return # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; # num_neighbors=5; # out_file_human=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_val_fiveKP.txt'); # out_file_human_new=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_val_fiveKP_noIm.txt'); # # modifyHumanFile(out_file_human,out_file_human_new) # # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; # out_file_human=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP.txt'); # out_file_human_new=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP_noIm.txt'); # # modifyHumanFile(out_file_human,out_file_human_new) # print out_file_human_new; # return # img_files=[]; # caption_files=[]; # out_dir='/home/SSD3/maheen-data/training_kp_withWarp_test_final' # dir_server='/home/SSD3/maheen-data'; # out_file_html=os.path.join(out_dir,'viz.html'); # for i in range(1,94): # im_file=os.path.join(out_dir,str(i)+'.jpg'); # warp_file=os.path.join(out_dir,str(i)+'_warp.jpg'); # im_file=util.getRelPath(im_file,dir_server); # warp_file=util.getRelPath(warp_file,dir_server); # img_files.append([im_file,warp_file]); # caption_files.append(['org','warp']); # visualize.writeHTML(out_file_html,img_files,caption_files,224,224); # return file_horse = '/home/SSD3/maheen-data/horse_project/horse/matches_5_train_fiveKP.txt' out_file_horse = '/home/SSD3/maheen-data/horse_project/horse_resize/matches_5_train_fiveKP.txt' lines = util.readLinesFromFile(file_horse) print len(lines) lines = list(set(lines)) print len(lines) lines = [line_curr.split(' ') for line_curr in lines] im_files = [line_curr[0] for line_curr in lines] npy_files = [line_curr[1] for line_curr in lines] out_dir_meta_old = '/home/SSD3/maheen-data/horse_project/horse/' out_dir_meta_new = '/home/SSD3/maheen-data/horse_project/horse_resize/' replace_paths = [out_dir_meta_old, out_dir_meta_new] args = [] for idx in range(len(im_files)): im_file = im_files[idx] npy_file = npy_files[idx] out_im_file = im_file.replace(replace_paths[0], replace_paths[1]) out_npy_file = npy_file.replace(replace_paths[0], replace_paths[1]) args.append((idx, im_file, npy_file, out_im_file, out_npy_file)) p = multiprocessing.Pool(multiprocessing.cpu_count()) p.map(resizeImAndNpy224, args) out_dir_meta_old = '/home/SSD3/maheen-data/horse_project/horse/' out_dir_meta_new = '/home/SSD3/maheen-data/horse_project/horse_resize/' replace_paths = [out_dir_meta_old, out_dir_meta_new] lines = util.readLinesFromFile(file_horse) lines_new = [ line.replace(replace_paths[0], replace_paths[1]) for line in lines ] util.writeFile(out_file_horse, lines_new) lines = util.readLinesFromFile(out_file_horse) print(len(lines)) im_file = lines[90].split(' ')[0] im = cv2.imread(im_file, 1) labels = np.load(lines[90].split(' ')[1]) for i in xrange(labels.shape[0]): cv2.circle(im, (labels[i][0], labels[i][1]), 2, (0, 0, 255), -1) cv2.imwrite('/home/SSD3/maheen-data/temp/check.png', im) return dir_out = '/home/SSD3/maheen-data/temp/horse_human/viz_transform_aflw_val' visualize.writeHTMLForFolder(dir_out) return out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' num_neighbors = 5 out_file_human = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt') out_file_human_new = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP_noIm.txt') modifyHumanFile(out_file_human, out_file_human_new) # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; out_file_human = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP.txt') out_file_human_new = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP_noIm.txt') modifyHumanFile(out_file_human, out_file_human_new) return matches_file = '/home/laoreja/data/knn_res_new/knn_5_points_val_list.txt' face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt' # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt'; face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt' out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse' out_dir_meta_horse_list = [ os.path.join(out_dir_meta_horse, 'im'), os.path.join(out_dir_meta_horse, 'npy') ] out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' out_dir_meta_face_list = [ os.path.join(out_dir_meta_face, 'im'), os.path.join(out_dir_meta_face, 'npy') ] out_dir_meta_face_old = '/home/SSD3/maheen-data/horse_project/face' out_dir_meta_face_old_list = [ os.path.join(out_dir_meta_face_old, 'im'), os.path.join(out_dir_meta_face_old, 'npy') ] num_neighbors = 5 out_file_face = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt') out_file_horse = os.path.join( out_dir_meta_horse, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt') missing_files = makeMatchFile(num_neighbors, matches_file, face_data_file, out_dir_meta_horse_list, out_dir_meta_face_list, out_file_horse, out_file_face, out_dir_meta_face_old_list) return matches_file = '/home/laoreja/data/knn_res_new/knn_5_points_train_list.txt' matches_file = '/home/maheenrashid/Downloads/knn_5_points_train_list_clean.txt' face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt' # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt'; face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt' out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse' out_dir_meta_horse_list = [ os.path.join(out_dir_meta_horse, 'im'), os.path.join(out_dir_meta_horse, 'npy') ] out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' out_dir_meta_face_list = [ os.path.join(out_dir_meta_face, 'im'), os.path.join(out_dir_meta_face, 'npy') ] out_dir_meta_face_old = '/home/SSD3/maheen-data/horse_project/face' out_dir_meta_face_old_list = [ os.path.join(out_dir_meta_face_old, 'im'), os.path.join(out_dir_meta_face_old, 'npy') ] num_neighbors = 5 out_file_face = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP.txt') out_file_horse = os.path.join( out_dir_meta_horse, 'matches_' + str(num_neighbors) + '_train_fiveKP.txt') missing_files = makeMatchFile(num_neighbors, matches_file, face_data_file, out_dir_meta_horse_list, out_dir_meta_face_list, out_file_horse, out_file_face, out_dir_meta_face_old_list) return out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' num_neighbors = 5 out_file_face = os.path.join(out_dir_meta_face, 'matches_' + str(num_neighbors) + '.txt') out_file_face_new = os.path.join( out_dir_meta_face, 'matches_noIm_' + str(num_neighbors) + '.txt') # modifyHumanFile(out_file_face,out_file_face_new); # old_data=util.readLinesFromFile(out_file_face); # old_data=[line_curr.split(' ')[1] for line_curr in old_data]; # new_data=util.readLinesFromFile(out_file_face_new); # new_data=[line_curr.split(' ')[0] for line_curr in new_data]; # assert len(old_data)==len(new_data); # for i,old_line in enumerate(old_data): # print i; # assert old_line==new_data[i]; return matches_file = '/home/laoreja/data/knn_res_new/5_points_list.txt' matches_file = '/home/laoreja/data/knn_res_new/knn_train_list.txt' face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt' face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt' out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse' out_dir_meta_horse_list = [ os.path.join(out_dir_meta_horse, 'im'), os.path.join(out_dir_meta_horse, 'npy') ] out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' out_dir_meta_face_list = [ os.path.join(out_dir_meta_face, 'im'), os.path.join(out_dir_meta_face, 'npy') ] num_neighbors = 5 out_file_face = os.path.join(out_dir_meta_face, 'matches_' + str(num_neighbors) + '.txt') out_file_horse = os.path.join(out_dir_meta_horse, 'matches_' + str(num_neighbors) + '.txt') makeMatchFile(num_neighbors, matches_file, face_data_file, out_dir_meta_horse_list, out_dir_meta_face_list, out_file_horse, out_file_face) return # script_saveTrainTxt() # dir_viz='/home/SSD3/maheen-data/temp/horse_human/viz_transform_aflw'; # visualize.writeHTMLForFolder(dir_viz,'.jpg'); return out_dir_meta = '/home/SSD3/maheen-data' face_dir = 'aflw_data' horse_dir = 'horse_data' num_neighbors = 5 path_replace_horse = [ '/home/laoreja/data/horse-images/annotation', os.path.join(out_dir_meta, horse_dir, 'im') ] path_replace_face = ['/npy/', '/im/'] new_match_file = os.path.join(out_dir_meta, face_dir, 'match_' + str(num_neighbors) + '.txt') out_face_train_file = os.path.join( out_dir_meta, face_dir, 'match_' + str(num_neighbors) + '_train.txt') out_horse_train_file = os.path.join( out_dir_meta, horse_dir, 'match_' + str(num_neighbors) + '_train.txt') horse_txt_file = os.path.join(out_dir_meta, horse_dir, 'train.txt') face_txt_file = os.path.join(out_dir_meta, face_dir, 'train.txt') horse_train = util.readLinesFromFile(horse_txt_file) horse_train_just_beginning = [ horse_curr.split(' ')[0] for horse_curr in horse_train ] horse_train_just_beginning = [ horse_curr[:horse_curr.rindex('.')] for horse_curr in horse_train_just_beginning ] print horse_train_just_beginning[0] face_train = util.readLinesFromFile(face_txt_file) face_train_just_beginning = [ face_curr.split(' ')[0] for face_curr in face_train ] face_train_just_beginning = [ face_curr[:face_curr.rindex('.')] for face_curr in face_train_just_beginning ] print len(horse_train) print horse_train[0] print len(face_train) print face_train[0] # return matches = util.readLinesFromFile(new_match_file) print(len(matches)) matches = [match_curr.split(' ') for match_curr in matches] horse_matches = [] face_matches = [] for match_curr in matches: assert len(match_curr) == num_neighbors + 1 horse_curr = match_curr[0] horse_curr_path, horse_name = os.path.split(horse_curr) if horse_curr_path[-3:] == 'gxy': horse_curr_path = horse_curr_path[:-3] horse_curr_path = horse_curr_path.replace(path_replace_horse[0], path_replace_horse[1]) horse_curr = os.path.join(horse_curr_path, horse_name[:horse_name.rindex('.')]) if horse_curr in horse_train_just_beginning: horse_match = horse_train[horse_train_just_beginning.index( horse_curr)] else: # print horse_curr # print match_curr[0]; # raw_input(); continue for face_curr in match_curr[1:]: face_curr = face_curr[:face_curr.rindex('.')] face_curr = face_curr.replace(path_replace_face[0], path_replace_face[1]) face_match = face_train[face_train_just_beginning.index(face_curr)] horse_matches.append(horse_match) face_matches.append(face_match) # print match_curr; # print match_curr[0]; # for idx,i in enumerate(match_curr[1:]): # print idx,face_matches[idx],i,horse_matches[idx] assert len(face_matches) == len(horse_matches) print len(face_matches) util.writeFile(out_face_train_file, face_matches) util.writeFile(out_horse_train_file, horse_matches) return # face_dir='/home/SSD3/maheen-data/face_data'; # train_txt=os.path.join(face_dir,'train.txt'); # files=util.readLinesFromFile(train_txt); # files=[file_curr.split(' ') for file_curr in files]; # [im_files,npy_files]=zip(*files); # for idx,npy_file in enumerate(npy_files): # print idx,len(npy_files); # assert os.path.exists(npy_file); # assert np.load(npy_file).shape[1]==3; # print len(im_files); # print (im_files[0]); # print len(npy_files); # print (npy_files[0]); dir_viz = '/home/SSD3/maheen-data/temp/horse_human/viz_transform' visualize.writeHTMLForFolder(dir_viz, '.jpg') return horse_data = '/home/SSD3/maheen-data/horse_data' new_face_data = '/home/SSD3/maheen-data/face_data' old_txt = 'train.txt' num_to_keep = 10 new_txt = 'train_' + str(num_to_keep) + '.txt' for data_type in [horse_data, new_face_data]: lines_new = util.readLinesFromFile(os.path.join(data_type, old_txt)) random.shuffle(lines_new) lines_new = lines_new[:num_to_keep] file_new = os.path.join(data_type, new_txt) util.writeFile(file_new, lines_new) print len(lines_new), file_new return
def main(): train_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/train.txt' files=util.readLinesFromFile(train_file); random.shuffle(files); files=files[:100]; img_paths_all=[line[:line.index(' ')] for line in files]; tif_paths_all=[line[line.index(' ')+1:] for line in files]; num_clusters=40; out_dir='/disk3/maheen_data/debug_networks'; util.mkdir(out_dir); out_dir_tif=os.path.join(out_dir,'tif'); util.mkdir(out_dir_tif); out_file_html=os.path.join(out_dir,'tif_suppressCluster.html'); out_files_tif_x=[os.path.join(out_dir_tif,img_name+'_x.png') for img_name in util.getFileNames(tif_paths_all,ext='False')]; out_files_tif_y=[os.path.join(out_dir_tif,img_name+'_y.png') for img_name in util.getFileNames(tif_paths_all,ext='False')]; for tif_path,out_file_x,out_file_y in zip(tif_paths_all,out_files_tif_x,out_files_tif_y): # print tif_path tif=scipy.misc.imread(tif_path); # print np.min(tif[:,:,:2]),np.max(tif[:,:,:2]) assert np.min(tif[:,:,:2])>0 and np.max(tif[:,:,:2])<num_clusters+1; saveTifGray(tif,out_file_x,out_file_y,num_clusters) makeImTifViz(img_paths_all,tif_paths_all,out_file_html,out_dir_tif,num_clusters=40,disk_path='disk3') return clusters_file='/disk2/mayExperiments/youtube_subset_new_cluster/clusters.mat'; clusters_ucf='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat'; min_idx_new,C_new=script_findMinCluster(clusters_file,new_flag=True); min_idx_ucf,C_ucf=script_findMinCluster(clusters_ucf,new_flag=False); print min_idx_new,min_idx_ucf return out_dir='/disk2/mayExperiments/imagenet_subset' out_file_html=out_dir+'.html'; img_paths=util.getFilesInFolder(out_dir,'.jpg'); tif_paths=[img_path.replace('.jpg','.tif') for img_path in img_paths] out_dir_tif=os.path.join(out_dir,'tif_viz'); util.mkdir(out_dir_tif); makeImTifViz(img_paths,tif_paths,out_file_html,out_dir_tif); return train_txt='/disk2/mayExperiments/ft_youtube_hmdb_ucfClusters/train.txt'; out_dir='/disk2/mayExperiments/eval_ucf_finetune'; out_dir_tif=os.path.join(out_dir,'train_tif_select'); train_txt='/disk2/mayExperiments/ft_youtube_hmdb_newClusters/train.txt'; out_dir='/disk2/mayExperiments/eval_newClusters_finetune'; util.mkdir(out_dir); out_dir_tif=os.path.join(out_dir,'train_tif_select'); util.mkdir(out_dir_tif); num_to_pick=20; num_clusters=40; train_data=util.readLinesFromFile(train_txt); img_paths=[line_curr[:line_curr.index(' ')] for line_curr in train_data]; tif_paths=[line_curr[line_curr.index(' ')+1:] for line_curr in train_data]; print img_paths[0].split('/'); # return dataset=np.array([img_path.split('/')[4] for img_path in img_paths]); print np.unique(dataset); idx_youtube=np.where(dataset=='youtube')[0]; classes_idx=[]; classes_idx.append(np.where(dataset!='youtube')[0]); img_paths_youtube=list(np.array(img_paths)[idx_youtube]); img_paths_youtube_classes=np.array([img_path[:img_path.index('_')] for img_path in util.getFileNames(img_paths_youtube)]) for class_curr in np.unique(img_paths_youtube_classes): idx_rel=np.where(img_paths_youtube_classes==class_curr)[0]; class_idx_org=idx_youtube[idx_rel]; classes_idx.append(class_idx_org); # print len(idx_youtube); for idx,class_idx in enumerate(classes_idx): # print len(class_idx); if idx>0: paths=np.array(img_paths)[class_idx]; dataset=[img_name[:img_name.index('_')] for img_name in util.getFileNames(paths)]; # print set(dataset); assert len(set(dataset))==1 img_paths_all=[]; tif_paths_all=[]; for class_idx in classes_idx: img_paths_rel=np.array(img_paths)[class_idx[:num_to_pick]]; tif_paths_rel=np.array(tif_paths)[class_idx[:num_to_pick]]; img_paths_all=img_paths_all+list(img_paths_rel); tif_paths_all=tif_paths_all+list(tif_paths_rel); out_files_tif_x=[os.path.join(out_dir_tif,img_name+'_x.png') for img_name in util.getFileNames(tif_paths_all,ext='False')]; out_files_tif_y=[os.path.join(out_dir_tif,img_name+'_y.png') for img_name in util.getFileNames(tif_paths_all,ext='False')]; for tif_path,out_file_x,out_file_y in zip(tif_paths_all,out_files_tif_x,out_files_tif_y): # print tif_path tif=scipy.misc.imread(tif_path); # print np.min(tif[:,:,:2]),np.max(tif[:,:,:2]) assert np.min(tif[:,:,:2])>0 and np.max(tif[:,:,:2])<num_clusters+1; saveTifGray(tif,out_file_x,out_file_y,num_clusters) out_file_html=out_dir_tif+'.html'; img_paths_html=[[util.getRelPath(img_curr) for img_curr in img_list] for img_list in zip(img_paths_all,out_files_tif_x,out_files_tif_y)]; # captions_html=[[util.getFileNames([img_curr],ext=False)[0] for img_curr in img_list] for img_list in zip(img_paths_all,out_files_tif_x,out_files_tif_y)]; captions_html=[['Image','Tif_x','Tif_y']]*len(img_paths_html); visualize.writeHTML(out_file_html,img_paths_html,captions_html);
def main(): val_file='/disk2/mayExperiments/finetuning_youtube_hmdb_llr/val_eq.txt' out_dir='/disk2/mayExperiments/eval_nC_zS_youtube'; model_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/opt_noFix_conv1_conv2_conv3_conv4_conv5_llr__iter_50000.caffemodel' clusters_file='/disk3/maheen_data/youtube_train_40/clusters_100000.mat'; train_val_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/train_val_conv1_conv2_conv3_conv4_conv5.prototxt'; util.mkdir(out_dir); out_dir_model=os.path.join(out_dir,'ft_youtube_model'); num_to_pick=100; util.mkdir(out_dir_model); gpu=0; model_file_orig='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/final.caffemodel' clusters_file_orig='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat'; out_dir_model_orig=os.path.join(out_dir,'ft_original_model'); util.mkdir(out_dir_model_orig); img_paths_test=saveFlosFromValFile(out_dir_model_orig,val_file,num_to_pick,model_file_orig,clusters_file_orig,gpu,train_val_file=None,overwrite=True); out_file_html=os.path.join(out_dir,'model_comparison.html'); out_dirs_flo_viz=[os.path.join(out_dir_model_orig,'flo_viz'),os.path.join(out_dir_model,'flo_viz')]; out_dirs_flo_viz_captions=['original_model','ft_youtube_model']; img_paths_html=[]; captions_html=[]; img_names=util.getFileNames(img_paths_test,ext=False); for img_path_test,img_name in zip(img_paths_test,img_names): row_curr=[]; row_curr.append(util.getRelPath(img_path_test)); for out_dir_curr in out_dirs_flo_viz: file_curr=os.path.join(out_dir_curr,img_name+'.png'); row_curr.append(util.getRelPath(file_curr)); captions_curr=[img_name]+out_dirs_flo_viz_captions; img_paths_html.append(row_curr) captions_html.append(captions_curr); visualize.writeHTML(out_file_html,img_paths_html,captions_html); return path_to_npy='/disk2/mayExperiments/validation_anno/'; path_to_im='/disk2/ms_coco/val2014/'; ext='.jpg' lim=100; out_file=os.path.join(path_to_npy,'index_'+str(lim)+'.p'); out_dir_scratch='/disk2/mayExperiments/flow_resolution_scratch/im_viz'; out_dir_scratch='/disk2/mayExperiments/flow_resolution_scratch/im_viz_new_3'; # out_dir_scratch='/disk2/mayExperiments/flow_resolution_scratch/im_viz_padding'; # model_file='/disk2/mayExperiments/finetuning_youtube_hmdb_llr/OptFlow_youtube_hmdb_iter_50000.caffemodel'; out_dir_scratch='/disk2/mayExperiments/flow_resolution_scratch/im_viz_padding_orig_model'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/final.caffemodel' # out_dir_scratch='/disk2/mayExperiments/flow_resolution_scratch/im_viz_padding_ft_ucf_model'; # out_dir_scratch='/disk2/mayExperiments/flow_resolution_scratch/im_viz_padding_ft_ucf_model'; # util.mkdir(out_dir_scratch); # model_file='/disk2/mayExperiments/ft_youtube_hmdb_ucfClusters/OptFlow_youtube_hmdb__iter_55000.caffemodel'; out_dir_scratch='/disk2/mayExperiments/flow_resolution_scratch/im_viz_padding_ft_ucf_model_imagenet'; util.mkdir(out_dir_scratch); model_file='/disk3/maheen_data/ft_imagenet_ucf/OptFlow_imagenet_hlr__iter_22000.caffemodel' clusters_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat'; out_dir_scratch='/disk2/mayExperiments/flow_resolution_scratch/im_viz_padding_ft_nC_sZ_youtube'; util.mkdir(out_dir_scratch); model_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/opt_noFix_conv1_conv2_conv3_conv4_conv5_llr__iter_50000.caffemodel' clusters_file='/disk3/maheen_data/youtube_train_40/clusters_100000.mat'; train_val_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/train_val_conv1_conv2_conv3_conv4_conv5.prototxt'; overwrite=True; window=160; step_size=16; thresh=0.50; scale_info=pickle.load(open(out_file,'rb')); # scale_all=['small','large','medium']; scale_all=['large','medium']; scale_pow=np.arange(-2,1.5,0.5); print scale_pow; scale_images=[2**pow_curr for pow_curr in scale_pow]; # scale_images=scale_images[]; print scale_images; # return # scale_images=[0.5,1,2]; lim_cases=20; gpu=0; print po.NUM_THREADS po.NUM_THREADS=multiprocessing.cpu_count(); print po.NUM_THREADS # return script_doEverything(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale_all, scale_images,lim_cases,gpu,model_file,clusters_file,train_val_file=train_val_file,overwrite=overwrite); script_writeHTMLForOverlap(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale_all, scale_images,lim_cases,gpu,model_file,clusters_file)
def script_saveSegVizAll(params): path_to_im_meta = params.path_to_im_meta; path_to_im_canon = params.path_to_im_canon; path_to_predScores_meta = params.path_to_predScores_meta; path_to_seg_meta = params.path_to_seg_meta; path_to_mat_overlap = params.path_to_mat_overlap; path_to_score_mat_meta = params.path_to_score_mat_meta; img_names_txt = params.img_names_txt; scale_idx_range = params.scale_idx_range; stride = params.stride; w = params.w; alpha = params.alpha; num_to_pick = params.num_to_pick; out_dir_overlay_meta = params.out_dir_overlay_meta; power_scale_range = params.power_scale_range power_step_size = params.power_step_size out_file_html = params.out_file_html height_width = params.height_width; num_threads = params.num_threads; out_dir_im_dict = params.out_dir_im_dict; overwrite = params.overwrite; util.mkdir(out_dir_overlay_meta); for scale_curr in scale_idx_range: util.mkdir(os.path.join(out_dir_overlay_meta,str(scale_curr))); img_names=util.readLinesFromFile(img_names_txt); args=[]; for idx_img_name,img_name in enumerate(img_names): arg_curr = (img_name,path_to_im_meta,path_to_im_canon,path_to_predScores_meta,path_to_seg_meta,path_to_mat_overlap, path_to_score_mat_meta,scale_idx_range,stride,w,alpha,num_to_pick,out_dir_overlay_meta,power_scale_range,power_step_size,out_dir_im_dict,overwrite,idx_img_name) args.append(arg_curr); # args=args[:10]; # args= [args[idx] for idx in [422]] # img_list_all=[]; # for arg in args: # img_list=saveSegVizImage(arg); # img_list_all.append(img_list); # for img_curr in img_list: # print img_curr; # break; p=multiprocessing.Pool(num_threads); img_list_all = p.map(saveSegVizImage,args); img_paths_html=[]; captions_html=[]; for img_list in img_list_all: img_row=[]; caption_row=[]; org_img=img_list[0]; # print org_img img_list=img_list[1:]; org_img_begin = '/'.join(org_img.split('/')[:2]); org_img = util.getRelPath(org_img,org_img_begin); # print org_img img_row.append(org_img); caption_row.append(util.getFileNames([org_img])[0]); for img_curr in img_list: img_begin = '/'.join(img_curr.split('/')[:2]); img_folder = img_curr.split('/')[-2]; img_row.append(util.getRelPath(img_curr,img_begin)); caption_row.append(img_folder); img_paths_html.append(img_row); captions_html.append(caption_row); visualize.writeHTML(out_file_html,img_paths_html,captions_html,height=height_width[0],width=height_width[1]); print out_file_html
def main(): # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters'; # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; # util.mkdir(out_dir); # train_file=os.path.join(out_dir,'train.txt'); # template_deploy_file='trainval_noFix_withRandom_diffForConv.prototxt'; # template_solver_file='solver_debug.prototxt'; # base_lr=0.000001; # snapshot=1000; # layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8']; # gpu=1; # commands=[]; # idx=len(layers)-4; # fix_layers=layers[1:idx+1]; # layer_str='_'.join(fix_layers); # print layer_str; # # return # model_file_curr=model_file # snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_'); # out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt'); # out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt'); # log_file=os.path.join(out_dir,'log_'+layer_str+'.log'); # replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu); # replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers); # command=printTrainingCommand(out_solver_file,log_file,model_file_curr); # util.writeFile(os.path.join(out_dir,'train.sh'),[command]); # return model_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/opt_noFix_conv1_conv2_conv3_conv4_conv5_llr__iter_50000.caffemodel' # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; solver_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/solver_conv1_conv2_conv3_conv4_conv5.prototxt'; deploy_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/deploy_conv1_conv2_conv3_conv4_conv5.prototxt'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; solver_file='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters/solver_conv1_conv2_conv3_conv4_conv5.prototxt' deploy_file='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters/deploy_conv1_conv2_conv3_conv4_conv5.prototxt'; justCheckGradients(solver_file,deploy_file,model_file); return out_dir='/disk3/maheen_data/debug_networks/sanityCheckDebug'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; util.mkdir(out_dir); train_file=os.path.join(out_dir,'train.txt'); template_deploy_file='deploy_withRandom.prototxt'; template_solver_file='solver_debug.prototxt'; base_lr=0.000001; snapshot=1000; layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8']; gpu=1; commands=[]; idx=len(layers)-1; fix_layers=layers[1:idx+1]; layer_str='_'.join(fix_layers); print layer_str; model_file_curr=model_file snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_'); out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt'); out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt'); log_file=os.path.join(out_dir,'log_'+layer_str+'.log'); replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu); replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers); command=printTrainingCommand(out_solver_file,log_file,model_file_curr); util.writeFile(os.path.join(out_dir,'train.sh'),[command]); return out_dir='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/'; out_dir='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig_llr_diff/'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; util.mkdir(out_dir); train_file=os.path.join(out_dir,'train.txt'); template_deploy_file='deploy_withRandom_yjConfig.prototxt'; template_solver_file='solver_debug.prototxt'; base_lr=0.00001; snapshot=500; layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8']; gpu=1; commands=[]; idx=len(layers)-4; fix_layers=layers[1:idx+1]; layer_str='_'.join(fix_layers); print layer_str; # return model_file_curr=model_file snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_'); out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt'); out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt'); log_file=os.path.join(out_dir,'log_'+layer_str+'.log'); replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu); replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers); command=printTrainingCommand(out_solver_file,log_file,model_file_curr); util.writeFile(os.path.join(out_dir,'train.sh'),[command]); return out_dir='/disk3/maheen_data/ft_youtube_40_noFix_noCopyFC8_FC7'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet'; # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel'; util.mkdir(out_dir); train_txt_orig_path='/disk3/maheen_data/ft_youtube_40/train.txt'; template_deploy_file='deploy_withRandom.prototxt'; template_solver_file='solver_debug.prototxt'; train_file=os.path.join(out_dir,'train.txt'); data=util.readLinesFromFile(train_txt_orig_path); random.shuffle(data); # data[:100]; util.writeFile(train_file,data); # shutil.copyfile(train_txt_orig_path,train_file); # out_dir='/disk3/maheen_data/ft_youtube_40_ucf_permute'; # train_file=os.path.join(out_dir,'train_permute.txt'); base_lr=0.0001; snapshot=2000; layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8']; gpu=0; # command_file=os.path.join(out_dir,'debug_0.sh'); commands=[]; # for idx in range(4,len(layers)): # if idx==0: # fix_layers=layers[0]; # layer_str=str(fix_layers); # model_file_curr=None; # else: idx=len(layers)-3; fix_layers=layers[1:idx+1]; layer_str='_'.join(fix_layers); print layer_str; return model_file_curr=model_file # print fix_layers snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_'); out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt'); out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt'); log_file=os.path.join(out_dir,'log_'+layer_str+'.log'); replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu); replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers); command=printTrainingCommand(out_solver_file,log_file,model_file_curr); util.writeFile(os.path.join(out_dir,'train.sh'),[command]); # commands.append(command); # util.writeFile(command_file,commands); return # out_dir='/disk3/maheen_data/debug_networks/noFix'; # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; # '/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/train.txt' # data=util.readLinesFromFile(train_txt_orig_path); # random.shuffle(data); # # data[:100]; # util.writeFile(train_file,data[:100]); # out_dir='/disk3/maheen_data/debug_networks/noFixNoCopy'; # model_file=None; out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayer'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; util.mkdir(out_dir); train_txt_orig_path='/disk3/maheen_data/debug_networks/noFix/train.txt'; deploy_file='/disk3/maheen_data/debug_networks/noFix/deploy.prototxt'; solver_file='/disk3/maheen_data/debug_networks/noFix/solver.prototxt'; # template_deploy_file='deploy_debug_noFix.prototxt'; template_deploy_file='deploy_fc8NoCopy.prototxt'; template_solver_file='solver_debug.prototxt'; train_file=os.path.join(out_dir,'train.txt'); # shutil.copyfile(train_txt_orig_path,train_file); base_lr=0.0001; snapshot=100; layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8']; return out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel'; layers=['conv1','conv2','conv3','conv4','conv5','fc6','fc7']; layers_str=[]; for idx in range(len(layers)): if idx==0: fix_layers=layers[0]; layer_str=str(fix_layers); else: fix_layers=layers[1:idx+1]; layer_str='_'.join(fix_layers); layers_str.append(layer_str); log_files=[os.path.join(out_dir,'log_'+layer_str+'.log') for layer_str in layers_str]; str_match=' solver.cpp:209] Iteration '; xAndYs=[svl.getIterationsAndLosses(log_file,str_match) for log_file in log_files]; out_files=[]; for layer_str,log_file in zip(layers_str,log_files): xAndY=svl.getIterationsAndLosses(log_file,str_match); print xAndY out_file=os.path.join(out_dir,'loss_'+layer_str+'.png'); visualize.plotSimple([xAndY],out_file,title=layer_str); out_files.append(out_file); out_file_html=os.path.join(out_dir,'losses_all.html'); img_paths=[[util.getRelPath(out_file,'/disk3')] for out_file in out_files]; captions=[['']]*len(out_files); print img_paths print captions visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=300); # out_file=os.path.join(out_dir,'losses_all.png'); # print len(xAndYs); # print xAndYs[-2][1] # visualize.plotSimple(xAndYs,out_file,legend_entries=layers_str,loc=0,outside=True) return # mean_standard_proto_file='/home/maheenrashid/Downloads/debugging_jacob/opticalflow/standard.binaryproto'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; layers_to_copy=['conv1','conv2','conv3','conv4','conv5'] # model_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/OptFlow_youtube_hmdb__iter_5000.caffemodel'; # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix'] # model_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic_llr/OptFlow_youtube_hmdb__iter_65000.caffemodel'; # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix'] # deploy_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/opt_train_coarse_xavier.prototxt'; # solver_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/train.prototxt'; deploy_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/deploy_debug.prototxt'; solver_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/solver_debug.prototxt'; # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix'] # layers_to_explore=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix'] # ,'fc6','fc7','fc8'] layers_to_explore=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix'] blobs_lr=[(0,0),(0,0),(0,0), # (10,20),(10,20), (0.1,0.2),(0.1,0.2), (1,2),(1,2),(1,2)] iterations=400; momentum=0.9; lr=0.000001; caffe.set_device(1) caffe.set_mode_gpu() solver=caffe.SGDSolver(solver_file); net_org=caffe.Net(deploy_file,model_file); # copy weights for layer_name in layers_to_copy: solver.net.params[layer_name][0].data[...]=net_org.params[layer_name][0].data; solver.net.params[layer_name][1].data[...]=net_org.params[layer_name][1].data; layer_names=list(solver.net._layer_names); ratios={}; for key in layers_to_explore: ratios[key]=[]; dict_layers={}; for idx_curr,layer_name in enumerate(layer_names): print idx_curr,layer_name, if layer_name in solver.net.params.keys(): print len(solver.net.params[layer_name]) update_prev=[np.zeros(solver.net.layers[idx_curr].blobs[0].diff.shape), np.zeros(solver.net.layers[idx_curr].blobs[1].diff.shape)]; blob_lr=list(blobs_lr[layers_to_explore.index(layer_name)]); dict_layers[idx_curr]=[layer_name,update_prev,blob_lr]; else: print 0; for idx_curr in dict_layers.keys(): print idx_curr,len(dict_layers[idx_curr]),dict_layers[idx_curr][0],dict_layers[idx_curr][1][0].shape,dict_layers[idx_curr][1][1].shape,dict_layers[idx_curr][2] for iteration in range(iterations): print iteration solver.net.forward(); solver.net.backward(); for idx_curr in dict_layers.keys(): rel_row=dict_layers[idx_curr] layer_name=rel_row[0]; update_prev=rel_row[1][0]; print rel_row[2][0] lr_curr=rel_row[2][0]*lr; diffs_curr=solver.net.params[layer_name][0].diff; weights_curr=solver.net.params[layer_name][0].data; param_scale = np.linalg.norm(weights_curr.ravel()) update = update_prev*momentum-lr_curr*diffs_curr; update_scale = np.linalg.norm(update.ravel()) ratio= update_scale / param_scale # want ~1e-3 print layer_name,ratio,update_scale,param_scale ratios[layer_name].append(ratio); for idx_curr,layer in enumerate(solver.net.layers): for idx_blob,blob in enumerate(layer.blobs): rel_row=dict_layers[idx_curr] layer_name=rel_row[0]; update_prev=rel_row[1][idx_blob]; lr_curr=rel_row[2][idx_blob]*lr; diffs_curr=blob.diff; update_curr=momentum*update_prev-(lr_curr*diffs_curr); blob.data[...] -= update_curr blob.diff[...] = np.zeros(blob.diff.shape); dict_layers[idx_curr][1][idx_blob]=update_curr;
def main(): old_file = '/home/laoreja/data/knn_res_new/knn_5_points_train_list.txt' new_file = '/home/maheenrashid/Downloads/knn_5_points_train_list_clean.txt' match_str = 'n02374451_4338.JPEG' lines = util.readLinesFromFile(old_file) lines_to_keep = [] for line in lines: if match_str not in line: lines_to_keep.append(line) assert len(lines_to_keep) == len(lines) - 1 util.writeFile(new_file, lines_to_keep) return file_curr = '/home/laoreja/finetune-deep-landmark/dataset/train/trainImageList_2.txt' out_file = '/home/maheenrashid/Downloads/trainImageList_2_clean.txt' lines = util.readLinesFromFile(file_curr) lines_to_keep = [] for line in lines: if line == '/home/laoreja/data/horse-images/annotation/imagenet_n02374451/gxy/n02374451_4338.JPEG 156 169 79 99 161 88 1 43 46 1 167 95 1 164 95 1 43 56 1': print 'found!' else: lines_to_keep.append(line) print len(lines_to_keep), len(lines) assert len(lines_to_keep) + 1 == len(lines) util.writeFile(out_file, lines_to_keep) return horse_file = '/home/SSD3/maheen-data/horse_project/horse/matches_5_val_fiveKP.txt' human_file = '/home/SSD3/maheen-data/horse_project/aflw/matches_5_val_fiveKP_noIm.txt' horse_data = util.readLinesFromFile(horse_file) human_data = util.readLinesFromFile(human_file) # horse_data=[horse_data[41]]; # human_data=[human_data[41]]; # print horse_data[0]; horse_im = [line_curr.split(' ')[0] for line_curr in horse_data] human_im = [ line_curr.split(' ')[0].replace('/npy/', '/im/').replace('.npy', '.jpg') for line_curr in human_data ] horse_npy = [line_curr.split(' ')[1] for line_curr in horse_data] human_npy = [line_curr.split(' ')[0] for line_curr in human_data] problem_cases = [] for horse_npy_curr in horse_npy: labels = np.load(horse_npy_curr) if np.any(labels < 0): problem_cases.append(horse_npy_curr) print len(problem_cases), len(set(problem_cases)) return dir_server = '/home/SSD3/maheen-data' out_dir_debug = os.path.join(dir_server, 'temp', 'debug_problem_batch/rerun') im_file = '/home/laoreja/data/horse-images/annotation/imagenet_n02374451/gxy/n02374451_4338.JPEG' npy_file = '/home/SSD3/maheen-data/temp/debug_problem_batch/rerun/npy/imagenet_n02374451/n02374451_4338.npy' out_file = os.path.join(out_dir_debug, 'check.png') saveImWithAnno((1, im_file, npy_file, out_file)) # arg=([156, 169, 79, 99], [[161, 88, 1], [43, 46, 1], [167, 95, 1], [164, 95, 1], [43, 56, 1]], '/home/SSD3/maheen-data/temp/debug_problem_batch/rerun/npy/imagenet_n02374451/n02374451_4338.npy', 0); # # print np.load(arg[2]); # saveBBoxNpy(arg); # # print np.load(arg[2]); return dir_server = '/home/SSD3/maheen-data' out_dir_debug = os.path.join(dir_server, 'temp', 'debug_problem_batch/rerun') util.mkdir(out_dir_debug) params_dict = {} params_dict[ 'path_txt'] = '/home/SSD3/maheen-data/temp/debug_problem_batch/train_dummy.txt' # '/home/laoreja/finetune-deep-landmark/dataset/train/trainImageList_2.txt'; params_dict['path_pre'] = None params_dict['type_data'] = 'horse' params_dict['out_dir_meta'] = out_dir_debug util.mkdir(params_dict['out_dir_meta']) params_dict['out_dir_im'] = os.path.join(params_dict['out_dir_meta'], 'im') params_dict['out_dir_npy'] = os.path.join(params_dict['out_dir_meta'], 'npy') params_dict['out_file_list_npy'] = os.path.join(params_dict['out_dir_npy'], 'data_list.txt') params_dict['out_file_list_im'] = os.path.join(params_dict['out_dir_im'], 'data_list.txt') params_dict['out_file_pairs'] = os.path.join(params_dict['out_dir_meta'], 'pairs.txt') params_dict['overwrite'] = True local_script_makeBboxPairFiles(params_dict) return npy_file = '/home/SSD3/maheen-data/horse_project/horse/npy/imagenet_n02374451/n02374451_4338.npy' labels = np.load(npy_file) print labels return dir_server = '/home/SSD3/maheen-data' out_dir_debug = os.path.join(dir_server, 'temp', 'debug_problem_batch') util.mkdir(out_dir_debug) out_horse_im_dir = os.path.join(out_dir_debug, 'horse_im') out_human_im_dir = os.path.join(out_dir_debug, 'human_im') util.mkdir(out_horse_im_dir) util.mkdir(out_human_im_dir) horse_file = '/home/SSD3/maheen-data/horse_project/horse_resize/matches_5_train_fiveKP_debug.txt' human_file = '/home/SSD3/maheen-data/horse_project/aflw/matches_5_train_fiveKP_noIm_debug.txt' horse_data = util.readLinesFromFile(horse_file) human_data = util.readLinesFromFile(human_file) horse_data = [horse_data[41]] human_data = [human_data[41]] print horse_data[0] horse_im = [line_curr.split(' ')[0] for line_curr in horse_data] human_im = [ line_curr.split(' ')[0].replace('/npy/', '/im/').replace('.npy', '.jpg') for line_curr in human_data ] horse_npy = [line_curr.split(' ')[1] for line_curr in horse_data] human_npy = [line_curr.split(' ')[0] for line_curr in human_data] args = [] for idx, horse_im_curr in enumerate(horse_im): args.append((idx, horse_im_curr, horse_npy[idx], os.path.join(out_horse_im_dir, str(idx) + '.jpg'))) for idx, horse_im_curr in enumerate(human_im): args.append((idx, horse_im_curr, human_npy[idx], os.path.join(out_human_im_dir, str(idx) + '.jpg'))) # saveImWithAnno(args[-1]); p = multiprocessing.Pool(multiprocessing.cpu_count()) p.map(saveImWithAnno, args) out_file_html = os.path.join(out_dir_debug, 'viz_matches.html') img_paths = [] captions = [] for idx in range(len(horse_im)): horse_im_curr = os.path.join(out_horse_im_dir, str(idx) + '.jpg') horse_im_curr = util.getRelPath(horse_im_curr, dir_server) human_im_curr = os.path.join(out_human_im_dir, str(idx) + '.jpg') human_im_curr = util.getRelPath(human_im_curr, dir_server) img_paths.append([horse_im_curr, human_im_curr]) captions.append(['horse ' + str(idx), 'human']) # for idx,horse_im_curr in enumerate(horse_im): # human_im_curr=util.getRelPath(human_im[idx],dir_server); # horse_im_curr=util.getRelPath(horse_im_curr,dir_server); # img_paths.append([horse_im_curr,human_im_curr]); # captions.append(['horse','human']); visualize.writeHTML(out_file_html, img_paths, captions, 224, 224)