def main():
	loss_seg_path='/disk3/maheen_data/headC_160/withFlow_gaussian_human_softmax/final/loss_all_final_seg.npy';
	loss_score_path='/disk3/maheen_data/headC_160/withFlow_gaussian_human_softmax/final/loss_all_final_score.npy';

	loss_seg_path_res = '/disk3/maheen_data/headC_160/withFlow_gaussian_human_softmax_res/final/loss_all_final_seg.npy';
	loss_score_path_res = '/disk3/maheen_data/headC_160/withFlow_gaussian_human_softmax_res/final/loss_all_final_score.npy';

	out_file_seg='/disk3/maheen_data/headC_160/withFlow_gaussian_human_softmax/final/loss_all_final_seg.png';
	out_file_score='/disk3/maheen_data/headC_160/withFlow_gaussian_human_softmax/final/loss_all_final_score.png';


	step_size=100;
	loss_seg=np.load(loss_seg_path);
	loss_score=np.load(loss_score_path);
	
	loss_seg=np.concatenate((loss_seg,np.load(loss_seg_path_res)));
	loss_score=np.concatenate((loss_score,np.load(loss_score_path_res)));
	print loss_seg.shape
	print loss_score.shape
	avg_seg=smoothOutLoss(loss_seg,step_size);
	avg_score=smoothOutLoss(loss_score,step_size);
	avg_score=avg_score[100:];
	avg_seg=avg_seg[100:];
	visualize.plotSimple([(range(len(avg_seg)),avg_seg)],out_file_seg,title='seg loss avg')
	visualize.plotSimple([(range(len(avg_score)),avg_score)],out_file_score,title='score loss avg')

	print out_file_score.replace('/disk3','vision3.cs.ucdavis.edu:1001');
예제 #2
0
def savePerClassCumulativeGraph(cum_freq,idx_perc,percents,out_file,title):
    # if norm:
    cum_freq=cum_freq/float(cum_freq[-1])
    xAndYs={};
    xAndYs['Cumulative Frequency']=(range(len(cum_freq)),cum_freq);
    for idx_curr,perc_curr in zip(idx_perc,percents):
        xAndYs[str(perc_curr*100)+'%']=([0,idx_curr,idx_curr],[perc_curr,perc_curr,0]);
    xlabel='Number of HashBins'
    ylabel='Percent of Images'
    visualize.plotSimple(xAndYs.values(),out_file,title=title,
        xlabel=xlabel,ylabel=ylabel,legend_entries=xAndYs.keys())
예제 #3
0
def script_createHistComparative():
    out_dir_meta='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
    train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
    layers=['pool5','fc6','fc7'];
    delta=5;
    caption_text=['Trained','Not Trained'];
    replace=[out_dir_meta+'/',''];
    degree=90;
    deg_to_see=0;
    # train_files=[os.path.join(train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
    # non_train_files=[os.path.join(non_train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
    # for idx in range(len(train_files)):

    combos=[(dir,layer) for dir in dirs for layer in layers];
    out_file_html=os.path.join(out_dir_meta,'hist_by_degree_'+str(degree)+'_comparisons_compress.html');
    img_paths=[];
    captions=[];

    for dir,layer in combos:

        file_train=os.path.join(train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p');
        # train_files[idx];
        file_non_train=os.path.join(non_train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p');
        # non_train_files[idx];

        hists_train,bins_train=pickle.load(open(file_train,'rb'));
        hists_non_train,bins_non_train=pickle.load(open(file_non_train,'rb'));
        
        mid_points_train=[bins_train[i]+bins_train[i+1]/float(2) for i in range(len(bins_train)-1)];
        mid_points_non_train=[bins_non_train[i]+bins_non_train[i+1]/float(2) for i in range(len(bins_non_train)-1)];
        
        # dir=file_train[file_train.rindex('/')+1:];
        # dir=dir[:dir.index('_')];
        out_file_just_file=layer+'_'+dir+'_'+str(degree)+'_'+str(delta)+'.png'
        out_file=os.path.join(out_dir_meta,out_file_just_file)
        title=dir+' Comparison';
        xlabel='Distance Rank';
        ylabel='Frequency';

        # print out_file
        img_paths.append([out_file_just_file]);
        captions.append([dir+' '+layer]);

        visualize.plotSimple(zip([mid_points_train,mid_points_non_train],[hists_train,hists_non_train]),out_file,title=title,xlabel=xlabel,ylabel=ylabel,legend_entries=['Trained','Non Trained'],loc=0);
    print out_file_html
    visualize.writeHTML(out_file_html,img_paths,captions,width=400,height=400);
예제 #4
0
def savePerClassCumulativeGraph(cum_freq, idx_perc, percents, out_file, title):
    # if norm:
    cum_freq = cum_freq / float(cum_freq[-1])
    xAndYs = {}
    xAndYs['Cumulative Frequency'] = (range(len(cum_freq)), cum_freq)
    for idx_curr, perc_curr in zip(idx_perc, percents):
        xAndYs[str(perc_curr * 100) + '%'] = ([0, idx_curr, idx_curr],
                                              [perc_curr, perc_curr, 0])
    xlabel = 'Number of HashBins'
    ylabel = 'Percent of Images'
    visualize.plotSimple(xAndYs.values(),
                         out_file,
                         title=title,
                         xlabel=xlabel,
                         ylabel=ylabel,
                         legend_entries=xAndYs.keys())
예제 #5
0
def script_visualizeRatios():
	ratio_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/ratios.p';
	out_file_plot='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/ratios_plot.png';

	ratio=pickle.load(open(ratio_file,'rb'));
	print ratio.keys();
	xAndYs=[];
	legend_entries=[];
	for key_curr in ratio.keys():
		print key_curr,np.array(ratio[key_curr]).shape;
		list_curr=np.array(ratio[key_curr]);
		index_nan=np.min(np.where(np.isnan(list_curr)));
		assert np.sum(np.isnan(list_curr[index_nan:]))==list_curr[index_nan:].size
		list_curr=list_curr[:index_nan];
		list_curr=list_curr[:100]
		xAndYs.append((range(len(list_curr)),list_curr));
		legend_entries.append(key_curr);

	visualize.plotSimple(xAndYs,out_file_plot,'update/weight ratio','iterations','ratio',legend_entries,0,True);
def compareGradInfoLayer(dirs,out_dirs,model_num,num_iters,file_pre_grad,layer_range,plot_title):
	img_paths=[];
	for dir_curr,out_dir_curr in zip(dirs,out_dirs):
		# dict_for_plotting={plot_title:OrderedDict()};

		dict_for_plotting=OrderedDict();
		for model_num_curr in model_num:
			file_pairs=[(os.path.join(dir_curr,model_num_curr,file_pre_grad+str(iter_curr)+'.npy'),
					os.path.join(dir_curr,model_num_curr,file_pre_grad+str(iter_curr)+'.npy')) for iter_curr in num_iters];
			means,_=getMagInfo(file_pairs,alt=True,range_to_choose=layer_range);
			dict_for_plotting[model_num_curr]=means[0];

		# for key_curr in dict_for_plotting.keys():
		out_file_curr=os.path.join(out_dir_curr,plot_title+'.png');
		xAndYs=dict_for_plotting.values();
		legend_entries=dict_for_plotting.keys();
		xAndYs=[(layer_range,x_curr) for x_curr in xAndYs];
		visualize.plotSimple(xAndYs,out_file_curr,title=plot_title,xlabel='layer',ylabel='magnitude',legend_entries=legend_entries,outside=True);
		img_paths.append(out_file_curr);

	return img_paths;			
예제 #7
0
def plotComparisonCurve(errors_all, out_file, labels):
    vals = []
    for err in errors_all:
        err = np.array(err)
        #         avg=np.mean(err,1);
        bin_keep = err >= 0
        err[err < 0] = 0
        div = np.sum(bin_keep, 1)
        sum_val = np.sum(err, 1).astype(np.float)
        avg = sum_val / div

        avg = np.sort(avg)
        vals.append(avg)

    xAndYs = [(range(len(val_curr)), val_curr) for val_curr in vals]
    xlabel = 'Sorted Image Number'
    ylabel = 'BBox Normalized Error'
    visualize.plotSimple(xAndYs,
                         out_file,
                         xlabel=xlabel,
                         ylabel=ylabel,
                         legend_entries=labels)
def compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html):
	img_paths=[];
	captions=[];

	for dir_curr,out_dir_curr in zip(dirs,out_dirs):
		dict_for_plotting={'grads_mag':OrderedDict(),'weights_mag':OrderedDict(),'ratios':OrderedDict()};

		dict_for_plotting=OrderedDict(dict_for_plotting.items());
		for model_num_curr in model_num:
			print model_num_curr;
			file_pairs=[(os.path.join(dir_curr,model_num_curr,file_pre_grad+str(iter_curr)+'.npy'),
					os.path.join(dir_curr,model_num_curr,file_pre_weight+str(iter_curr)+'.npy')) for iter_curr in num_iters];
			means,_=getMagInfo(file_pairs,alt=True);

			dict_for_plotting['grads_mag'][model_num_curr]=means[0];
			dict_for_plotting['weights_mag'][model_num_curr]=means[1];
			dict_for_plotting['ratios'][model_num_curr]=means[2];

		img_paths_curr=[];
		captions_curr=[];
		for key_curr in dict_for_plotting.keys():
			out_file_curr=os.path.join(out_dir_curr,key_curr+'.png');
			data=dict_for_plotting[key_curr];
			xAndYs=data.values();
			legend_entries=data.keys();
			xAndYs=[(range(len(x_curr)),x_curr) for x_curr in xAndYs];
			visualize.plotSimple(xAndYs,out_file_curr,title=key_curr,xlabel='layer',ylabel='magnitude',legend_entries=legend_entries,outside=True);
			print out_file_curr.replace('/disk3','vision3.cs.ucdavis.edu:1001');
			img_paths_curr.append(util.getRelPath(out_file_curr,'/disk3'));
			# print dir_curr.split('/');
			captions_curr.append(dir_curr.split('/')[-2]+' '+dir_curr.split('/')[-1]+' '+key_curr);

		img_paths.append(img_paths_curr);
		captions.append(captions_curr);

	visualize.writeHTML(out_file_html,img_paths,captions,height=500,width=500);
	print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001');
def script_visualizeLossesFromExperiment():

    # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet';
    out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayerAlexNet';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel';
    layers=['conv1','conv2','conv3','conv4','conv5','fc6'];
    # ,'fc7'];

    layers_str=[];
    for idx in range(len(layers)):
        # if idx==0:
        #     fix_layers=layers[0];
        #     layer_str=str(fix_layers);
        # else:
        fix_layers=layers[:idx+1];
        layer_str='_'.join(fix_layers);
        layers_str.append(layer_str);

    log_files=[os.path.join(out_dir,'log_'+layer_str+'.log') for layer_str in layers_str];
    str_match=' solver.cpp:209] Iteration ';
    xAndYs=[svl.getIterationsAndLosses(log_file,str_match) for log_file in log_files];

    out_files=[];
    for layer_str,log_file in zip(layers_str,log_files):
        xAndY=svl.getIterationsAndLosses(log_file,str_match);
        print xAndY
        out_file=os.path.join(out_dir,'loss_'+layer_str+'.png');
        visualize.plotSimple([xAndY],out_file,title=layer_str);
        out_files.append(out_file);

    out_file_html=os.path.join(out_dir,'losses_all.html');
    img_paths=[[util.getRelPath(out_file,'/disk3')] for out_file in out_files];
    captions=[['']]*len(out_files);
    print img_paths
    print captions
    visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=300);
예제 #10
0
def script_getMinLoss():
    dir_metas=['/home/SSD3/maheen-data/horse_project/full_system_small_data',
               '/home/SSD3/maheen-data/horse_project/face_baselines_small_data'];
    file_pres,num_data=getFilePres();
    loss_dir_pre='test_images_';
    loss_dir_posts=[str(num_curr) for num_curr in range(1680,9000,1680)];
    log_file='log_test.txt';
#     num_data=
    losses_all=[];
    losses_all_end=[];
    min_loss_iter_all=[];
    for dir_meta in dir_metas:
        loss_curr=[];
        loss_end=[];
        min_loss_iter=[];
        for file_pre in file_pres:
            min_loss,min_loss_post=getMinLoss(os.path.join(dir_meta,file_pre),loss_dir_pre,loss_dir_posts,log_file);
#             print loss_dir_posts
            loss_end_curr,min_loss_post_end=getMinLoss(os.path.join(dir_meta,file_pre),loss_dir_pre,['8400'],log_file);
            loss_curr.append(min_loss);
            loss_end.append(loss_end);
            min_loss_iter.append(min_loss_post);
            print min_loss,min_loss_post,loss_end_curr,min_loss_post_end;

        losses_all.append(loss_curr);
        losses_all_end.append(loss_end)
        min_loss_iter_all.append(min_loss_iter);
        
    out_file=os.path.join(dir_metas[0],'comparison_best.png');
    print len(file_pres),len(losses_all[0]),len(losses_all[1]),len(num_data)
    xAndYs=[(num_data,losses_all[0]),(num_data,losses_all[1]),\
           (num_data,losses_all_end[0]),(num_data,losses_all_end[1])];
    legend_entries=['Ours Best','Baseline Best','Ours 8400','Baseline 8400'];
    visualize.plotSimple(xAndYs,out_file,title='',xlabel='Training Data',\
                         ylabel='Average Euclidean Distance',legend_entries=legend_entries);
    print out_file.replace(dir_server,click_str)
def script_recallCheck():

    coco_eval_file='/disk2/temp/recall_check.p';
    # note. only for top 50
    out_file=coco_eval_file+'ng';
    coco_eval=pickle.load(open(coco_eval_file,'rb'));
    print coco_eval['recall'].shape
    
    recall=coco_eval['recall'];
    labels=['all','small','medium','large'];
    xAndYs=[];
    for idx in range(len(labels)):
        rec_curr=recall[:,:,idx,:];
        print rec_curr.shape
        rec_curr= np.mean(rec_curr,axis=0);
        print rec_curr.shape;
        rec_curr=rec_curr.ravel();
        print rec_curr.shape;
        # raw_input();
        xAndYs.append((range(len(rec_curr)),rec_curr));

    visualize.plotSimple(xAndYs,out_file,title='Avg Recall',xlabel='Number of proposals',ylabel='Avg Recall',legend_entries=labels)
    print xAndYs[0][1][9],xAndYs[0][1][99],xAndYs[0][1][999]
    print [np.mean(a[1]) for a in xAndYs];
def main():
    # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters';
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    # util.mkdir(out_dir);

    # train_file=os.path.join(out_dir,'train.txt');

    # template_deploy_file='trainval_noFix_withRandom_diffForConv.prototxt';
    # template_solver_file='solver_debug.prototxt';

    # base_lr=0.000001;
    # snapshot=1000;
    # layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];
    # gpu=1;
    # commands=[];
    # idx=len(layers)-4;
    # fix_layers=layers[1:idx+1];

    # layer_str='_'.join(fix_layers);
    # print layer_str;
    # # return
    # model_file_curr=model_file
    # snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
    # out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
    # out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
    # log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
    # replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu);
    # replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
    # command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
    # util.writeFile(os.path.join(out_dir,'train.sh'),[command]);


    # return
    model_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/opt_noFix_conv1_conv2_conv3_conv4_conv5_llr__iter_50000.caffemodel'
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';
    solver_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/solver_conv1_conv2_conv3_conv4_conv5.prototxt';
    deploy_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/deploy_conv1_conv2_conv3_conv4_conv5.prototxt';

    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';
    solver_file='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters/solver_conv1_conv2_conv3_conv4_conv5.prototxt'
    deploy_file='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters/deploy_conv1_conv2_conv3_conv4_conv5.prototxt';    
    justCheckGradients(solver_file,deploy_file,model_file);


    return
    out_dir='/disk3/maheen_data/debug_networks/sanityCheckDebug';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    util.mkdir(out_dir);

    train_file=os.path.join(out_dir,'train.txt');

    template_deploy_file='deploy_withRandom.prototxt';
    template_solver_file='solver_debug.prototxt';

    base_lr=0.000001;
    snapshot=1000;
    layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];
    gpu=1;
    commands=[];
    idx=len(layers)-1;
    fix_layers=layers[1:idx+1];

    layer_str='_'.join(fix_layers);
    print layer_str;
    
    model_file_curr=model_file
    snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
    out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
    out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
    log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
    replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu);
    replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
    command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
    util.writeFile(os.path.join(out_dir,'train.sh'),[command]);


    return

    out_dir='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/';
    out_dir='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig_llr_diff/';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    util.mkdir(out_dir);

    train_file=os.path.join(out_dir,'train.txt');

    template_deploy_file='deploy_withRandom_yjConfig.prototxt';
    template_solver_file='solver_debug.prototxt';

    base_lr=0.00001;
    snapshot=500;
    layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];
    gpu=1;
    commands=[];
    idx=len(layers)-4;
    fix_layers=layers[1:idx+1];

    layer_str='_'.join(fix_layers);
    print layer_str;
    # return
    model_file_curr=model_file
    snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
    out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
    out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
    log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
    replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu);
    replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
    command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
    util.writeFile(os.path.join(out_dir,'train.sh'),[command]);



    return
    out_dir='/disk3/maheen_data/ft_youtube_40_noFix_noCopyFC8_FC7';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet';
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel';

    util.mkdir(out_dir);
    train_txt_orig_path='/disk3/maheen_data/ft_youtube_40/train.txt';

    
    template_deploy_file='deploy_withRandom.prototxt';
    template_solver_file='solver_debug.prototxt';

    train_file=os.path.join(out_dir,'train.txt');
    
    data=util.readLinesFromFile(train_txt_orig_path);
    random.shuffle(data);
    # data[:100];
    util.writeFile(train_file,data);


    # shutil.copyfile(train_txt_orig_path,train_file);



    # out_dir='/disk3/maheen_data/ft_youtube_40_ucf_permute';
    # train_file=os.path.join(out_dir,'train_permute.txt');

    

    base_lr=0.0001;
    snapshot=2000;
    layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];
    gpu=0;
    # command_file=os.path.join(out_dir,'debug_0.sh');
    commands=[];

    # for idx in range(4,len(layers)):
    #     if idx==0:
    #         fix_layers=layers[0];
    #         layer_str=str(fix_layers);
    #         model_file_curr=None;
    #     else:

    idx=len(layers)-3;
    fix_layers=layers[1:idx+1];

    layer_str='_'.join(fix_layers);
    print layer_str;

    return
    model_file_curr=model_file
    # print fix_layers
    snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
    out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
    out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
    log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
    replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu);
    replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
    command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
    util.writeFile(os.path.join(out_dir,'train.sh'),[command]);

    # commands.append(command);

    
    # util.writeFile(command_file,commands);





    return
    # out_dir='/disk3/maheen_data/debug_networks/noFix';
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';
    # '/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/train.txt'
    # data=util.readLinesFromFile(train_txt_orig_path);
    # random.shuffle(data);
    # # data[:100];
    # util.writeFile(train_file,data[:100]);

    # out_dir='/disk3/maheen_data/debug_networks/noFixNoCopy';
    # model_file=None;


    out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayer';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';


    util.mkdir(out_dir);
    train_txt_orig_path='/disk3/maheen_data/debug_networks/noFix/train.txt';

    deploy_file='/disk3/maheen_data/debug_networks/noFix/deploy.prototxt';
    solver_file='/disk3/maheen_data/debug_networks/noFix/solver.prototxt';

    # template_deploy_file='deploy_debug_noFix.prototxt';
    template_deploy_file='deploy_fc8NoCopy.prototxt';
    template_solver_file='solver_debug.prototxt';

    train_file=os.path.join(out_dir,'train.txt');
    

    # shutil.copyfile(train_txt_orig_path,train_file);


    

    base_lr=0.0001;
    snapshot=100;
    layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];









    return
    out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel';
    layers=['conv1','conv2','conv3','conv4','conv5','fc6','fc7'];

    layers_str=[];
    for idx in range(len(layers)):
        if idx==0:
            fix_layers=layers[0];
            layer_str=str(fix_layers);
        else:
            fix_layers=layers[1:idx+1];
            layer_str='_'.join(fix_layers);
        layers_str.append(layer_str);

    log_files=[os.path.join(out_dir,'log_'+layer_str+'.log') for layer_str in layers_str];
    str_match=' solver.cpp:209] Iteration ';
    xAndYs=[svl.getIterationsAndLosses(log_file,str_match) for log_file in log_files];

    out_files=[];
    for layer_str,log_file in zip(layers_str,log_files):
        xAndY=svl.getIterationsAndLosses(log_file,str_match);
        print xAndY
        out_file=os.path.join(out_dir,'loss_'+layer_str+'.png');
        visualize.plotSimple([xAndY],out_file,title=layer_str);
        out_files.append(out_file);

    out_file_html=os.path.join(out_dir,'losses_all.html');
    img_paths=[[util.getRelPath(out_file,'/disk3')] for out_file in out_files];
    captions=[['']]*len(out_files);
    print img_paths
    print captions
    visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=300);

    # out_file=os.path.join(out_dir,'losses_all.png');

    # print len(xAndYs);
    # print xAndYs[-2][1]

    # visualize.plotSimple(xAndYs,out_file,legend_entries=layers_str,loc=0,outside=True)

    


        

        







    


    return
    # mean_standard_proto_file='/home/maheenrashid/Downloads/debugging_jacob/opticalflow/standard.binaryproto';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';
    layers_to_copy=['conv1','conv2','conv3','conv4','conv5']

    # model_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/OptFlow_youtube_hmdb__iter_5000.caffemodel';
    # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']

    # model_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic_llr/OptFlow_youtube_hmdb__iter_65000.caffemodel';
    # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']


    # deploy_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/opt_train_coarse_xavier.prototxt';
    # solver_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/train.prototxt';
    deploy_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/deploy_debug.prototxt';
    solver_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/solver_debug.prototxt';

    # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']
    # layers_to_explore=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']

    
    # ,'fc6','fc7','fc8']
    layers_to_explore=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']
    blobs_lr=[(0,0),(0,0),(0,0),
            # (10,20),(10,20),
            (0.1,0.2),(0.1,0.2),
            (1,2),(1,2),(1,2)]

    iterations=400;
    momentum=0.9;
    lr=0.000001;

    caffe.set_device(1)
    caffe.set_mode_gpu()



    solver=caffe.SGDSolver(solver_file);

    
    net_org=caffe.Net(deploy_file,model_file);
        
    # copy weights
    for layer_name in layers_to_copy:
        solver.net.params[layer_name][0].data[...]=net_org.params[layer_name][0].data;
        solver.net.params[layer_name][1].data[...]=net_org.params[layer_name][1].data;

    layer_names=list(solver.net._layer_names);

    ratios={};
    for key in layers_to_explore:
        ratios[key]=[];


    dict_layers={};
    for idx_curr,layer_name in enumerate(layer_names):
        print idx_curr,layer_name,
        if layer_name in solver.net.params.keys():
            print len(solver.net.params[layer_name])
            update_prev=[np.zeros(solver.net.layers[idx_curr].blobs[0].diff.shape),
                        np.zeros(solver.net.layers[idx_curr].blobs[1].diff.shape)];
            blob_lr=list(blobs_lr[layers_to_explore.index(layer_name)]);
            dict_layers[idx_curr]=[layer_name,update_prev,blob_lr];
        else:
            print 0;

    for idx_curr in dict_layers.keys():
        print idx_curr,len(dict_layers[idx_curr]),dict_layers[idx_curr][0],dict_layers[idx_curr][1][0].shape,dict_layers[idx_curr][1][1].shape,dict_layers[idx_curr][2]

    
    for iteration in range(iterations):
        print iteration
    


        solver.net.forward();
        solver.net.backward();
        
        for idx_curr in dict_layers.keys():

            rel_row=dict_layers[idx_curr]
            layer_name=rel_row[0];
            update_prev=rel_row[1][0];
            print rel_row[2][0]
            lr_curr=rel_row[2][0]*lr;
            
            diffs_curr=solver.net.params[layer_name][0].diff;
            weights_curr=solver.net.params[layer_name][0].data;

            param_scale = np.linalg.norm(weights_curr.ravel())

            update = update_prev*momentum-lr_curr*diffs_curr;
            
            update_scale = np.linalg.norm(update.ravel())
            ratio= update_scale / param_scale # want ~1e-3
            print layer_name,ratio,update_scale,param_scale
            ratios[layer_name].append(ratio);
        
        for idx_curr,layer in enumerate(solver.net.layers):
            for idx_blob,blob in enumerate(layer.blobs):
                rel_row=dict_layers[idx_curr]
                layer_name=rel_row[0];
                update_prev=rel_row[1][idx_blob];
                lr_curr=rel_row[2][idx_blob]*lr;
                
                diffs_curr=blob.diff;
                update_curr=momentum*update_prev-(lr_curr*diffs_curr);
                blob.data[...] -= update_curr
                blob.diff[...] = np.zeros(blob.diff.shape);
                
                dict_layers[idx_curr][1][idx_blob]=update_curr;
예제 #13
0
def script_createHistComparative():
    out_dir_meta = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d'
    train_pre = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_pre = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    dirs = [
        dir[:-7] for dir in os.listdir(
            '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations')
        if dir.endswith('_pascal')
    ]
    layers = ['pool5', 'fc6', 'fc7']
    delta = 5
    caption_text = ['Trained', 'Not Trained']
    replace = [out_dir_meta + '/', '']
    degree = 90
    deg_to_see = 0
    # train_files=[os.path.join(train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
    # non_train_files=[os.path.join(non_train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
    # for idx in range(len(train_files)):

    combos = [(dir, layer) for dir in dirs for layer in layers]
    out_file_html = os.path.join(
        out_dir_meta,
        'hist_by_degree_' + str(degree) + '_comparisons_compress.html')
    img_paths = []
    captions = []

    for dir, layer in combos:

        file_train = os.path.join(
            train_pre + '_' + layer + '_all_azimuths',
            dir + '_' + str(degree) + '_' + str(delta) + '_compress_data.p')
        # train_files[idx];
        file_non_train = os.path.join(
            non_train_pre + '_' + layer + '_all_azimuths',
            dir + '_' + str(degree) + '_' + str(delta) + '_compress_data.p')
        # non_train_files[idx];

        hists_train, bins_train = pickle.load(open(file_train, 'rb'))
        hists_non_train, bins_non_train = pickle.load(
            open(file_non_train, 'rb'))

        mid_points_train = [
            bins_train[i] + bins_train[i + 1] / float(2)
            for i in range(len(bins_train) - 1)
        ]
        mid_points_non_train = [
            bins_non_train[i] + bins_non_train[i + 1] / float(2)
            for i in range(len(bins_non_train) - 1)
        ]

        # dir=file_train[file_train.rindex('/')+1:];
        # dir=dir[:dir.index('_')];
        out_file_just_file = layer + '_' + dir + '_' + str(degree) + '_' + str(
            delta) + '.png'
        out_file = os.path.join(out_dir_meta, out_file_just_file)
        title = dir + ' Comparison'
        xlabel = 'Distance Rank'
        ylabel = 'Frequency'

        # print out_file
        img_paths.append([out_file_just_file])
        captions.append([dir + ' ' + layer])

        visualize.plotSimple(zip([mid_points_train, mid_points_non_train],
                                 [hists_train, hists_non_train]),
                             out_file,
                             title=title,
                             xlabel=xlabel,
                             ylabel=ylabel,
                             legend_entries=['Trained', 'Non Trained'],
                             loc=0)
    print out_file_html
    visualize.writeHTML(out_file_html,
                        img_paths,
                        captions,
                        width=400,
                        height=400)
    counts_correct_all=p.map(getCountsCorrectByIdx,args);


    counts_comb=[[],[],[],[]];
    for r in range(4):
        for counts_correct in counts_correct_all:
            if len(counts_correct[r])>0:
                counts_comb[r].append(counts_correct[r]);
    counts_comb=[np.array(curr) for curr in counts_comb];
    avgs=[np.mean(curr,axis=0) for curr in counts_comb];


    print [avgs[-1][idx-1] for idx in [10,100,1000]];
    xAndYs=[(range(1,1001),counts_curr) for counts_curr in avgs];
    legend=['small','medium','large','total'];
    visualize.plotSimple(xAndYs,out_file_plot,title='Average Recall',xlabel='Number of Proposals',ylabel='Average Recall',legend_entries=legend,loc=0);
    print out_file_plot.replace('/disk3','vision3.cs.ucdavis.edu:1001');



    
    
    return
    meta_old=os.path.join(path_pedro,'meta_info.npy');
    meta_old=np.load(meta_old);
    print meta_old.shape

    meta_new=os.path.join(path_us,'meta_info.npy');
    meta_new=np.load(meta_new);
    print meta_new.shape
    print np.sum(meta_old[:,0]<0);
예제 #15
0
def main(argv):
    print 'hello'

    parser = argparse.ArgumentParser(description='Visualize Loss')
    parser.add_argument('-log_file',
                        dest='log_file',
                        type=str,
                        nargs='+',
                        help='log file(s) to parse')
    parser.add_argument(
        '-out_file_pre',
        dest='out_file_pre',
        type=str,
        help='loss file pre path. will be appended with _seg and _score')

    parser.add_argument('-val',
                        dest='val',
                        action='store_true',
                        help='to plot val loss')

    args = parser.parse_args(argv)

    # print args;

    # # log_file='/disk3/maheen_data/headC_160/noFlow_gaussian_human/log.txt';
    # # out_file_pre='/disk3/maheen_data/headC_160/noFlow_gaussian_human/loss';
    # getopt.getopt(args, options[, long_options])

    log_file = args.log_file
    # argv[1];
    out_file_pre = args.out_file_pre

    # argv[2];
    # out_file_seg=out_file_pre+'_seg.png';
    out_file_score = out_file_pre + '_score.png'

    start_str = 'minibatches processed: '
    # start_line_str='
    lines_all = []
    last_idx_all = []
    for log_file_curr in log_file:
        lines = util.readLinesFromFile(log_file_curr)
        lines_all.append(lines)
        lines_rev = lines[::-1]
        for line_curr in lines_rev:
            if line_curr.startswith(start_str):
                last_idx_all.append(getNumFollowing(line_curr, start_str, ','))
                break

    assert len(lines_all) == len(last_idx_all)

    scores_seg = []
    scores_score = []
    iterations = []
    scores_seg_val = []
    scores_score_val = []
    iterations_val = []
    # lines_all=lines_all[100:];
    for lines_idx, lines in enumerate(lines_all):

        score_str = ', loss = '
        # loss = 20.738169;
        # seg_str=', loss seg = ';

        if lines_idx == 0:
            to_add = 0
        else:
            to_add = last_idx_all[lines_idx - 1]

        lines_rel = [
            line for line in lines if start_str in line and score_str in line
        ]

        for line in lines_rel:
            iterations.append(getNumFollowing(line, start_str, ',') + to_add)
            # scores_seg.append(getNumFollowing(line,seg_str,','));
            scores_score.append(getNumFollowing(line, score_str, None))

        if args.val == True:
            score_str = ', val loss = '
            # seg_str=', val loss seg = ';

            lines_rel = [
                line for line in lines
                if line.startswith(start_str) and score_str in line
            ]

            for line in lines_rel:
                iterations_val.append(
                    getNumFollowing(line, start_str, ',') + to_add)
                # scores_seg_val.append(getNumFollowing(line,seg_str,','));
                scores_score_val.append(getNumFollowing(line, score_str, None))

        # print len(iterations);

    # num_start=60;
    # num_start_val=2;
    iterations = [
        iter_curr for idx, iter_curr in enumerate(iterations)
        if scores_score[idx] < 1
    ]
    scores_score = [
        iter_curr for idx, iter_curr in enumerate(scores_score)
        if scores_score[idx] < 1
    ]

    iterations_val = [
        iter_curr for idx, iter_curr in enumerate(iterations_val)
        if scores_score_val[idx] < 1
    ]
    scores_score_val = [
        iter_curr for idx, iter_curr in enumerate(scores_score_val)
        if scores_score_val[idx] < 1
    ]
    # iterations= iterations[num_start:];
    # scores_score= scores_score[num_start:];
    # iterations_val= iterations_val[num_start_val:];
    # scores_score_val= scores_score_val[num_start_val:];
    if args.val == False:
        visualize.plotSimple([(iterations, scores_score)],
                             out_file_score,
                             title='Score Loss at ' + str(iterations[-1]),
                             xlabel='Iterations',
                             ylabel='Loss')
        # visualize.plotSimple([(iterations,scores_seg)],out_file_seg,title='Seg Loss at '+str(iterations[-1]),xlabel='Iterations',ylabel='Loss')
    else:
        visualize.plotSimple([(iterations, scores_score),
                              (iterations_val, scores_score_val)],
                             out_file_score,
                             title='Score Loss at ' + str(iterations[-1]),
                             xlabel='Iterations',
                             ylabel='Loss',
                             legend_entries=['Train', 'Val'])
def main():


	# dirs=['/disk3/maheen_data/headC_160_withFlow_human_xavier_unit_floStumpPretrained_fullTraining/gradient_checks'];
	# # dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct_16']];
	# range_flow=range(1);
	# out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs];
	# [util.mkdir(out_dir_curr) for out_dir_curr in out_dirs];
	
	# model_num=range(5000,45000,5000);
	# model_num.append(45000);
	# print model_num
	
	# model_num=[str(model_num_curr) for model_num_curr in model_num]	
	# num_iters=range(1,5);
	# # num_iters=range(2,3);
	# file_pre_weight='weight_mag_n_';
	# file_pre_grad='grad_mag_n_';	


	# out_file_html=os.path.join(dirs[0],'comparison_grads_weights_ratios_n.html');

	# compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html)
	
	# out_file_html=os.path.join(dirs[0],'comparison_grads_seg_no_seg_16.html');
	
	# layer_range=[26,27,28,31];
	# num_iters=range(1,5,2);
	# img_paths_seg_flow=compareGradInfoLayer([dirs[i] for i in range_flow],[out_dirs[i] for i in range_flow],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag')
	# layer_range=range(26,32);
	# num_iters=range(2,5,2);
	# img_paths_score_flow=compareGradInfoLayer([dirs[i] for i in range_flow],[out_dirs[i] for i in range_flow],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag')

	# img_paths=[img_paths_seg_flow,img_paths_score_flow];
	# img_paths=[[util.getRelPath(path_curr,'/disk3') for path_curr in list_curr] for list_curr in img_paths];
	# captions=[];
	# for list_curr in img_paths:
	# 	captions_curr=[];
	# 	for path in list_curr:
	# 		path_split=path.split('/');
	# 		caption=path_split[-4]+' '+path_split[-3];
	# 		captions_curr.append(caption);
	# 		print caption
	# 	captions.append(captions_curr);
	# visualize.writeHTML(out_file_html,img_paths,captions,height=500,width=500);
	# print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001');



	# return
	# model_dir='/disk3/maheen_data/headC_160/withFlow_human_xavier_unit_floStumpPretrained_fullTraining/intermediate'
	# out_dir_meta='/disk3/maheen_data/headC_160_withFlow_human_xavier_unit_floStumpPretrained_fullTraining'
	# out_dir=os.path.join(out_dir_meta,'gradient_checks')
	# util.mkdir(out_dir);
	# params=[(model_dir,out_dir,'40')];
	# out_file_commands_pre=os.path.join(out_dir_meta,'debug_commands_');		
	# path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_withFlow_debug.th'


	# model_num=range(5000,45000,5000);
	# model_num.append(45000);
	# print model_num
	# # return
	# writeCommandsForTrainDebug(params,path_to_train_file,out_file_commands_pre,model_num)


	# return

	dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug'];
	dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct']];
	range_flow=range(1);
	# range_noflow=range(2,len(dirs));

	# dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug','/disk3/maheen_data/headC_160/noFlow_human_debug'];
	# dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct_16','incorrect']];
	# range_flow=range(2);
	# range_noflow=range(2,len(dirs));

	out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs];
	[util.mkdir(out_dir_curr) for out_dir_curr in out_dirs];

	model_num=range(5000,100000,20000);
	model_num.append(100000);
	
	# model_num=range(2000,32000,6000);
	# model_num.append(32000);
	
	model_num=[str(model_num_curr) for model_num_curr in model_num]	
	print model_num
	# num_iters=range(1,21);
	num_iters=range(2,3);
	file_pre_weight='weight_mag_n_';
	file_pre_grad='grad_mag_n_';	


	out_file_html=os.path.join('/disk3/maheen_data/headC_160/withFlow_human_debug','comparison_grads_weights_ratios.html');

	compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html)
	# compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html)
	out_file_html=os.path.join('/disk3/maheen_data/headC_160/withFlow_human_debug','comparison_grads_seg_no_seg.html');
	

	layer_range=[26,27,28,31];
	# layer_range=[27,28];

	num_iters=range(3,21,2);
	img_paths_seg_flow=compareGradInfoLayer([dirs[i] for i in range_flow],[out_dirs[i] for i in range_flow],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag')
	layer_range=range(26,32);
	num_iters=range(2,21,2);
	img_paths_score_flow=compareGradInfoLayer([dirs[i] for i in range_flow],[out_dirs[i] for i in range_flow],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag')

	# layer_range=[13,14,17]
	# num_iters=range(1,21,2);
	# img_paths_seg_noflow=compareGradInfoLayer([dirs[i] for i in range_noflow],[out_dirs[i] for i in range_noflow],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag')
	# layer_range=range(13,18);
	# num_iters=range(2,21,2);
	# img_paths_score_noflow=compareGradInfoLayer([dirs[i] for i in range_noflow],[out_dirs[i] for i in range_noflow],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag')

	# layer_range=[26,27,28,31];
	# num_iters=range(1,21,2);
	# img_paths_seg_flow=compareGradInfoLayer(dirs[1:],out_dirs[1:],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag')
	# layer_range=range(26,32);
	# num_iters=range(2,21,2);
	# num_iters=range(2,7,2);
	# img_paths_score_flow=compareGradInfoLayer(dirs[1:],out_dirs[1:],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag')
	# img_paths=[img_paths_seg_flow,img_paths_score_flow,img_paths_seg_noflow,img_paths_score_noflow];

	img_paths=[img_paths_seg_flow,img_paths_score_flow];
	img_paths=[[util.getRelPath(path_curr,'/disk3') for path_curr in list_curr] for list_curr in img_paths];
	captions=[];
	for list_curr in img_paths:
		captions_curr=[];
		for path in list_curr:
			path_split=path.split('/');
			caption=path_split[-4]+' '+path_split[-3];
			captions_curr.append(caption);
			print caption
		captions.append(captions_curr);
	visualize.writeHTML(out_file_html,img_paths,captions,height=500,width=500);
	print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001');




	return
	params=[('/disk3/maheen_data/headC_160/withFlow_xavier_16_score/intermediate','/disk3/maheen_data/headC_160/withFlow_human_debug/correct_16','40')];
	out_file_commands_pre='/disk3/maheen_data/headC_160/withFlow_human_debug/debug_commands_16_';		
	path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_withFlow_debug.th'

	# params=[('/disk3/maheen_data/headC_160/noFlow_gaussian_human_softmax/intermediate_res','/disk3/maheen_data/headC_160/noFlow_human_debug/correct','40'),
	# 		('/disk3/maheen_data/headC_160/noFlow_gaussian_human/intermediate','/disk3/maheen_data/headC_160/noFlow_human_debug/incorrect','56')];
	# [util.mkdir(params_curr[1]) for params_curr in params];
	# out_file_commands_pre='/disk3/maheen_data/headC_160/noFlow_human_debug/debug_commands_';
	# path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_noFlow_debug.th'	

	model_num=range(2000,32000,6000);
	model_num.append(32000);
	print model_num
	return
	writeCommandsForTrainDebug(params,path_to_train_file,out_file_commands_pre,model_num)



	# maheen_data/headC_160/withFlow_xavier_16_score/intermediate/

	return
	dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug','/disk3/maheen_data/headC_160/noFlow_human_debug'];
	# out_file_html=os.path.join(dirs[0],'comparison_dloss_seg_score.html');
	dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct']];
	out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs];
	file_seg = 'loss_seg.npy';
	file_score = 'loss_score.npy';
	num_iters=range(2,21);
	model_num=range(5000,100000,20000);
	model_num.append(100000);
	model_num=[str(model_num_curr) for model_num_curr in model_num]	

	img_paths=[];
	captions=[];

	for dir_curr,out_dir_curr in zip(dirs,out_dirs):
		dict_for_plotting={'loss_seg_all':OrderedDict(),'loss_score_all':OrderedDict(),'loss_ratio_all':OrderedDict()};
		for model_num_curr in model_num:
			file_curr_seg=os.path.join(dir_curr,model_num_curr,file_seg);
			file_curr_score=os.path.join(dir_curr,model_num_curr,file_score);

			score_all=np.load(file_curr_score);
			score_all=score_all[[0]+range(1,len(score_all),2)];
			score_all=score_all*32


			seg_all=np.load(file_curr_seg);
			seg_all=seg_all[range(0,len(seg_all),2)];

			ratios=seg_all/score_all;
			print dir_curr,model_num_curr
			print np.mean(score_all),np.mean(seg_all),np.mean(ratios);

		# 	break;
		# break;			
				# if num_iter_curr==2:
				# 	score_all.append(np.load(file_curr_score));
				# 	seg_all.append(np.load(file_curr_seg));
				# elif num_iter_curr%2==0:
				# 	score_all.append(np.load(file_curr_score));
				# else:
				# 	seg_all.append(np.load(file_curr_seg));


				

				# seg_curr=np.load(file_curr_seg);
				# seg_curr=np.unique(np.ravel(seg_curr));
				# score_curr=list(np.load(file_curr_score));
				# score_curr=list(np.unique(np.ravel(score_curr)));
				# seg_all.extend(seg_curr);
				# score_all.extend(score_curr);

	# 		seg_all=list(set(seg_all));
	# 		score_all=list(set(score_all));
	# 		seg_all.sort();
	# 		score_all.sort();
			
	# 		dict_for_plotting['seg_all'][model_num_curr]=seg_all;
	# 		dict_for_plotting['score_all'][model_num_curr]=score_all;

	# 	img_paths_curr=[];
	# 	captions_curr=[];
	# 	for key_curr in dict_for_plotting.keys():
	# 		out_file_curr=os.path.join(out_dir_curr,key_curr+'.png');
	# 		data=dict_for_plotting[key_curr];
	# 		xAndYs=data.values();
	# 		legend_entries=data.keys();
	# 		xAndYs=[(range(len(x_curr)),x_curr) for x_curr in xAndYs];
	# 		visualize.plotSimple(xAndYs,out_file_curr,title=key_curr,xlabel='sorted idx',ylabel='values',legend_entries=legend_entries,outside=True);
	# 		print out_file_curr.replace('/disk3','vision3.cs.ucdavis.edu:1001');
	# 		img_paths_curr.append(util.getRelPath(out_file_curr,'/disk3'));
	# 		# print dir_curr.split('/');
	# 		captions_curr.append(dir_curr.split('/')[-2]+' '+dir_curr.split('/')[-1]+' '+key_curr);

	# 	img_paths.append(img_paths_curr);
	# 	captions.append(captions_curr);

	# visualize.writeHTML(out_file_html,img_paths,captions,height=200,width=200);
	# print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001');			

	

	return
	dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug','/disk3/maheen_data/headC_160/noFlow_human_debug'];
	out_file_html=os.path.join(dirs[0],'comparison_dloss_seg_score.html');
	dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct']];
	out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs];
	file_seg_pre='dloss_seg_';
	file_score_pre='dloss_score_';
	num_iters=range(2,21);
	model_num=range(25000,100000,20000);
	model_num.append(100000);
	model_num=[str(model_num_curr) for model_num_curr in model_num]	
	
	img_paths=[];
	captions=[];
	
	for dir_curr,out_dir_curr in zip(dirs,out_dirs):
		dict_for_plotting={'seg_all':OrderedDict(),'score_all':OrderedDict()};
		for model_num_curr in model_num:
			seg_all=[];
			score_all=[];
			for num_iter_curr in num_iters:
				file_curr_seg=os.path.join(dir_curr,model_num_curr,file_seg_pre+str(num_iter_curr)+'.npy');
				file_curr_score=os.path.join(dir_curr,model_num_curr,file_score_pre+str(num_iter_curr)+'.npy');

				seg_curr=np.load(file_curr_seg);
				seg_curr=np.unique(np.ravel(seg_curr));
				score_curr=list(np.load(file_curr_score));
				score_curr=list(np.unique(np.ravel(score_curr)));
				seg_all.extend(seg_curr);
				score_all.extend(score_curr);

			seg_all=list(set(seg_all));
			score_all=list(set(score_all));
			seg_all.sort();
			score_all.sort();
			
			dict_for_plotting['seg_all'][model_num_curr]=seg_all;
			dict_for_plotting['score_all'][model_num_curr]=score_all;

		img_paths_curr=[];
		captions_curr=[];
		for key_curr in dict_for_plotting.keys():
			out_file_curr=os.path.join(out_dir_curr,key_curr+'.png');
			data=dict_for_plotting[key_curr];
			xAndYs=data.values();
			legend_entries=data.keys();
			xAndYs=[(range(len(x_curr)),x_curr) for x_curr in xAndYs];
			visualize.plotSimple(xAndYs,out_file_curr,title=key_curr,xlabel='sorted idx',ylabel='values',legend_entries=legend_entries,outside=True);
			print out_file_curr.replace('/disk3','vision3.cs.ucdavis.edu:1001');
			img_paths_curr.append(util.getRelPath(out_file_curr,'/disk3'));
			# print dir_curr.split('/');
			captions_curr.append(dir_curr.split('/')[-2]+' '+dir_curr.split('/')[-1]+' '+key_curr);

		img_paths.append(img_paths_curr);
		captions.append(captions_curr);

	visualize.writeHTML(out_file_html,img_paths,captions,height=200,width=200);
	print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001');			


	# dloss_seg=np.load(file_curr_seg);
	# dloss_seg=np.mean(dloss_seg,axis=0);
	# print dloss_seg.shape;
	# dloss_seg=dloss_seg[0];
	# print dloss_seg.shape;

	# dloss_score=np.load(file_curr_score);
	# print dloss_score.shape;
	# print np.min(dloss_score);
	# print np.max(dloss_score);
	# print np.min(dloss_seg);
	# print np.max(dloss_seg);


	# # print dloss_seg[0],np.min(dloss_seg),np.max(dloss_seg);
	# print file_curr;

		

	return
	dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug','/disk3/maheen_data/headC_160/noFlow_human_debug'];
	out_file_html=os.path.join('/disk3/maheen_data/headC_160/withFlow_human_debug','comparison_grads_weights_ratios.html');
	
	# dirs=['/disk3/maheen_data/headC_160/withFlow_human_debug'];
	out_file_html=os.path.join('/disk3/maheen_data/headC_160/withFlow_human_debug','comparison_grads_seg_no_seg.html');
	

	dirs=[os.path.join(dir_curr,dir_in) for dir_curr in dirs for dir_in in ['correct','incorrect']];
	out_dirs=[os.path.join(dir_curr,'plots') for dir_curr in dirs];
	[util.mkdir(out_dir_curr) for out_dir_curr in out_dirs];

	model_num=range(5000,100000,20000);
	model_num.append(100000);
	model_num=[str(model_num_curr) for model_num_curr in model_num]	
	num_iters=range(1,21);
	file_pre_weight='weight_mag_';
	file_pre_grad='grad_mag_';	

	# file_curr=os.path.join(dirs[0],model_num[-1],file_pre_grad+'1.npy');
	# grads=np.load(file_curr);
	# print grads.shape;
	# grads=grads[::2];
	# print grads.shape;
	# print grads[26:]

	# compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html)
	# layer_range=range(26,32);
	layer_range=[26,27,28,31];
	num_iters=range(1,21,2);
	img_paths_seg_flow=compareGradInfoLayer(dirs[:2],out_dirs[:2],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag')
	layer_range=range(26,32);
	num_iters=range(2,21,2);
	img_paths_score_flow=compareGradInfoLayer(dirs[:2],out_dirs[:2],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag')

	# layer_range=range(13,18)
	layer_range=[13,14,17]
	num_iters=range(1,21,2);
	img_paths_seg_noflow=compareGradInfoLayer(dirs[2:],out_dirs[2:],model_num,num_iters,file_pre_grad,layer_range,'seg_grad_mag')
	layer_range=range(13,18);
	num_iters=range(2,21,2);
	img_paths_score_noflow=compareGradInfoLayer(dirs[2:],out_dirs[2:],model_num,num_iters,file_pre_grad,layer_range,'score_grad_mag')

	img_paths=[img_paths_seg_flow,img_paths_score_flow,img_paths_seg_noflow,img_paths_score_noflow];
	img_paths=[[util.getRelPath(path_curr,'/disk3') for path_curr in list_curr] for list_curr in img_paths];
	# print img_paths
	captions=[];
	# path='../../../../../../..//maheen_data/headC_160/noFlow_human_debug/incorrect/plots/score_grad_mag.png'
	for list_curr in img_paths:
		captions_curr=[];
		for path in list_curr:
			path_split=path.split('/');
			caption=path_split[-4]+' '+path_split[-3];
			captions_curr.append(caption);
			print caption
		captions.append(captions_curr);
	visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=300);
	print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001');

	# out_files=compareGradInfoLayer(dirs,out_dirs,model_num,num_iters,file_pre_grad,layer_range)

		# break;
			

			
	return
	params=[('/disk3/maheen_data/headC_160/withFlow_gaussian_human_softmax/intermediate','/disk3/maheen_data/headC_160/withFlow_human_debug/correct','40'),
					('/disk3/maheen_data/headC_160/withFlow_gaussian_human/intermediate','/disk3/maheen_data/headC_160/withFlow_human_debug/incorrect','56')];
	out_file_commands_pre='/disk3/maheen_data/headC_160/withFlow_human_debug/debug_commands_';		
	path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_withFlow_debug.th'

	# params=[('/disk3/maheen_data/headC_160/noFlow_gaussian_human_softmax/intermediate_res','/disk3/maheen_data/headC_160/noFlow_human_debug/correct','40'),
	# 		('/disk3/maheen_data/headC_160/noFlow_gaussian_human/intermediate','/disk3/maheen_data/headC_160/noFlow_human_debug/incorrect','56')];
	# [util.mkdir(params_curr[1]) for params_curr in params];
	# out_file_commands_pre='/disk3/maheen_data/headC_160/noFlow_human_debug/debug_commands_';
	# path_to_train_file='/home/maheenrashid/Downloads/deep_proposals/torch_new/headC_160_noFlow_debug.th'	

	model_num=range(5000,100000,20000);
	model_num.append(100000);
	writeCommandsForTrainDebug(params,path_to_train_file,out_file_commands_pre,model_num)
	
	return
	print 'hello';
	dir_debug_with_flow='/disk3/maheen_data/headC_160/withFlow_human_debug';
	dir_debug_no_flow='/disk3/maheen_data/headC_160/noFlow_human_debug';
	score_dir='score_gradient_start';
	seg_dir='seg_gradient_start';

	dirs=[os.path.join(dir_debug_no_flow,score_dir),os.path.join(dir_debug_with_flow,score_dir),
		os.path.join(dir_debug_no_flow,seg_dir),os.path.join(dir_debug_with_flow,seg_dir)]

	for dir_curr in dirs:
		np_files=util.getFilesInFolder(dir_curr,'.npy');
		np_nums=[int(file_curr[file_curr.rindex('_')+1:file_curr.rindex('.')]) for file_curr in np_files];
		sort_idx=np.argsort(np_nums);
		np_files=np.array(np_files)[sort_idx];

		gradients=getGradientMags(np_files);
		print dir_curr;
		print len(gradients),np.mean(gradients),min(gradients),max(gradients);