コード例 #1
0
ファイル: pascal_3d.py プロジェクト: maheenRashid/caffe
def script_createHistDifferenceHTML():
    out_dir_meta='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
    train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
    layers=['pool5','fc6','fc7'];
    degrees=[0,45,90,135,180];
    delta=5;
    caption_text=['Trained','Not Trained'];
    replace=[out_dir_meta+'/',''];
    degree=90;
    for layer in layers:
        out_file_html=os.path.join(out_dir_meta,layer+'_all_azimuths'+'.html')

        img_paths=[];
        caption_paths=[];
        for dir in dirs:
            

            img_paths_row=[];
            caption_paths_row=[];    

            for idx,file_pre in enumerate([train_pre,non_train_pre]):        
                curr_dir=os.path.join(file_pre+'_'+layer+'_all_azimuths');
                im_file=os.path.join(curr_dir,dir+'_'+str(degree)+'_'+str(delta)+'_compress.png');
                
                img_paths_row.append(im_file.replace(replace[0],replace[1]));
                caption_paths_row.append(caption_text[idx]+' '+layer+' '+dir);

            img_paths.append(img_paths_row);
            caption_paths.append(caption_paths_row);
        
        visualize.writeHTML(out_file_html,img_paths,caption_paths,height=400,width=400);
        print out_file_html
コード例 #2
0
def script_visualizeScoreResults(test_output_file,gt_output_file,gt_data_output_file,out_file_html,rel_path,means,out_dir):
	data=np.load(gt_data_output_file);
	gt_label=np.load(gt_output_file);
	pred_label=np.load(test_output_file);
	print data.shape,gt_label.shape,pred_label.shape

	# print data.shape;
	im_paths=[];captions=[];
	correct=0;
	for im_no in range(data.shape[0]):
		data_path=os.path.join(out_dir,str(im_no)+'_data.png');
		# gt_path=os.path.join(out_dir,str(im_no)+'_gt.png');
		# pred_path=os.path.join(out_dir,str(im_no)+'_pred.png');
		# scipy.misc.imsave(data_path,reshapeMat(data[im_no],means));

		visualize.saveMatAsImage(reshapeMat(data[im_no],means)/255,data_path);
		pred_label_curr=pred_label[im_no,0];
		gt_label_curr=gt_label[im_no,0];
		# visualize.saveMatAsImage(reshapeMat(gt[im_no],means),gt_path);
		# visualize.saveMatAsImage(reshapeMat(out[im_no],means),pred_path);
		im_paths.append([data_path.replace(rel_path[0],rel_path[1])]);
		if (pred_label_curr*gt_label_curr)>=0:
			correct=correct+1;

		captions.append(['Pred '+str(pred_label_curr)+' GT '+str(gt_label_curr)]);
		# if im_no==10:
		# 	break;
	print correct
	visualize.writeHTML(out_file_html,im_paths,captions,height=224,width=224);
コード例 #3
0
def script_visualizeHashBinDensity(params):

    hash_tables = params.hash_tables
    in_files = params.in_files
    out_files = params.out_files
    out_file_html = params.out_file_html
    rel_path = params.rel_path
    bins = params.bins
    height_width = params.height_width

    min_maxs = []
    for file_idx, in_file in enumerate(in_files):
        densities = pickle.load(open(in_file, 'rb'))
        densities = densities.values()
        min_maxs.append((min(densities), max(densities)))
        visualize.hist(densities,
                       out_files[file_idx],
                       bins=bins,
                       normed=True,
                       xlabel='Bin Density',
                       ylabel='Frequency',
                       title="Hash Bins' Density")

    img_files_html = [[out_file.replace(rel_path[0], rel_path[1])]
                      for out_file in out_files]
    captions_html = []
    for idx_hash_table, hash_table in enumerate(hash_tables):
        caption_curr = str(hash_table) + ' ' + str(min_maxs[idx_hash_table])
        captions_html.append([caption_curr])

    visualize.writeHTML(out_file_html, img_files_html, captions_html,
                        height_width[0], height_width[1])
コード例 #4
0
def script_visualizeNNComparisonWithHash(params):
    in_file = params.in_file
    in_file_hash = params.in_file_hash
    out_file_html = params.out_file_html
    rel_path = params.rel_path
    topn = params.topn
    img_size = params.img_size

    [
        _, _, labels_test, labels_train, img_paths_test, img_paths_train,
        indices, _
    ] = pickle.load(open(in_file, 'rb'))
    [indices_hash, _, _] = pickle.load(open(in_file_hash, 'rb'))

    img_paths_nn, captions_nn = getImgPathsAndCaptionsNN(
        indices, img_paths_test, img_paths_train, labels_test, labels_train,
        rel_path)
    img_paths_hash, captions_hash = getImgPathsAndCaptionsNN(
        indices_hash, img_paths_test, img_paths_train, labels_test,
        labels_train, rel_path)

    img_paths_all = []
    captions_all = []
    for idx in range(len(img_paths_nn)):
        img_paths_all.append(img_paths_nn[idx][:topn])
        img_paths_all.append(img_paths_hash[idx][:topn])
        captions_all.append([x + ' nn' for x in captions_nn[idx][:topn]])
        captions_all.append([x + ' hash' for x in captions_hash[idx][:topn]])

    visualize.writeHTML(out_file_html, img_paths_all, captions_all,
                        img_size[0], img_size[1])
コード例 #5
0
def visualizeRankedPatchesPerClass(class_score_info, num_to_display,
                                   out_file_html, rel_path, height_width):

    img_paths_all = []
    captions_all = []

    for selected_class, class_label, out_file in class_score_info:
        [list_scores, list_files] = pickle.load(open(out_file, 'rb'))
        num_patches = len(list_files)
        img_paths, scores_picked, idx_display = getNRankedPatches(
            list_scores, list_files, num_to_display)
        img_paths = [
            img_path.replace(rel_path[0], rel_path[1])
            for img_path in img_paths
        ]
        captions = []
        for idx_idx_curr, idx_curr in enumerate(idx_display):
            score_curr = round(scores_picked[idx_idx_curr], 5)
            caption_curr = str(idx_idx_curr) + ' Rank ' + str(
                idx_curr +
                1) + ' of ' + str(num_patches) + ' Score: ' + str(score_curr)
            caption_curr = class_label + ' ' + caption_curr
            captions.append(caption_curr)
        # captions=[[list_scores[sort_idx[idx_curr]],5))] for idx_idx_curr,idx_curr in enumerate(idx_display)]
        # print captions
        img_paths_all.append(img_paths)
        captions_all.append(captions)

    img_paths_all = np.array(img_paths_all).T
    captions_all = np.array(captions_all).T
    visualize.writeHTML(out_file_html, img_paths_all, captions_all,
                        height_width[0], height_width[0])
    print out_file_html
コード例 #6
0
def script_compareHashWithToyExperiment(params):
    in_file = params.in_file;
    num_hash_tables_all = params.num_hash_tables_all;
    key_type = params.key_type;
    out_file_indices = params.out_file_indices;
    out_file_pres = params.out_file_pres;
    out_file_html = params.out_file_html;
    rel_path = params.rel_path;

    [features_test,features_train,labels_test,labels_train,_,_,indices,_]=pickle.load(open(in_file,'rb'));
    visualize.saveMatAsImage(indices,out_file_indices);    
    
    hammings=[];
    for out_file_pre,num_hash_tables in zip(out_file_pres,num_hash_tables_all):
        indices_hash = getIndicesHash(features_test,features_train,num_hash_tables,key_type);
        visualize.saveMatAsImage(indices_hash,out_file_pre+'.png');    
        hamming=util.getHammingDistance(indices,indices_hash);
        pickle.dump([indices_hash,indices,hamming],open(out_file_pre+'.p','wb'));

        hammings.append(np.mean(hamming));
    
    sizes = scipy.misc.imread(out_file_indices);
    sizes = sizes.shape

    im_files_html=[];
    captions_html=[];
    for idx,out_file_pre in enumerate(out_file_pres):
        out_file_curr=out_file_pre+'.png'
        key_str=str(key_type);
        key_str=key_str.replace('<type ','').replace('>','');
        caption_curr='NN Hash. Num Hash Tables: '+str(num_hash_tables_all[idx])+' '+'Hamming Distance: '+str(hammings[idx]);
        im_files_html.append([out_file_indices.replace(rel_path[0],rel_path[1]),out_file_curr.replace(rel_path[0],rel_path[1])])
        captions_html.append(['NN cosine',caption_curr]);

    visualize.writeHTML(out_file_html,im_files_html,captions_html,sizes[0]/2,sizes[1]/2);
コード例 #7
0
def script_visualizePatchesByAngleDifference(params):
    out_file=params.out_file
    rel_path=params.rel_path
    output=getNNRankComparisonInfo(params)
    
    img_paths_test = output['img_paths_test']
    img_paths_train = output['img_paths_nn_train']
    img_paths_no_train = output['img_paths_nn_no_train']
    nn_rank_train = output['nn_rank_train']
    nn_rank_no_train = output['nn_rank_no_train']
    
    html_img_paths=[];
    captions=[];
    for idx,org_img_path in enumerate(img_paths_test):
        for nn_rank_row,img_path_row in [(nn_rank_train[idx],img_paths_train[idx]),(nn_rank_no_train[idx],img_paths_no_train[idx])]:
            html_row=[];
            caption_row=[];
            html_row.append(org_img_path.replace(rel_path[0],rel_path[1]));
            caption_row.append('Test Image ')
            for idx_im,im in enumerate(img_path_row):
                html_row.append(im.replace(rel_path[0],rel_path[1]));
                caption_row.append(str(nn_rank_row[idx_im])+' '+str(idx))
            html_img_paths.append(html_row);
            captions.append(caption_row);
    visualize.writeHTML(out_file,html_img_paths,captions);
コード例 #8
0
def script_visualizeHashBinDensity(params):

    hash_tables = params.hash_tables
    in_files = params.in_files
    out_files = params.out_files
    out_file_html = params.out_file_html
    rel_path = params.rel_path
    bins = params.bins
    height_width = params.height_width

    min_maxs=[];
    for file_idx,in_file in enumerate(in_files):
        densities=pickle.load(open(in_file,'rb'));
        densities=densities.values();
        min_maxs.append((min(densities),max(densities)))
        visualize.hist(densities,out_files[file_idx],bins=bins,normed=True,xlabel='Bin Density',
            ylabel='Frequency',title="Hash Bins' Density")

    img_files_html=[[out_file.replace(rel_path[0],rel_path[1])] for out_file in out_files];
    captions_html=[]
    for idx_hash_table,hash_table in enumerate(hash_tables):
        caption_curr=str(hash_table)+' '+str(min_maxs[idx_hash_table]);
        captions_html.append([caption_curr]);

    visualize.writeHTML(out_file_html,img_files_html,captions_html,height_width[0],height_width[1]);
コード例 #9
0
def script_visualizePatchesByAngleDifference(params):
    out_file = params.out_file
    rel_path = params.rel_path
    output = getNNRankComparisonInfo(params)

    img_paths_test = output['img_paths_test']
    img_paths_train = output['img_paths_nn_train']
    img_paths_no_train = output['img_paths_nn_no_train']
    nn_rank_train = output['nn_rank_train']
    nn_rank_no_train = output['nn_rank_no_train']

    html_img_paths = []
    captions = []
    for idx, org_img_path in enumerate(img_paths_test):
        for nn_rank_row, img_path_row in [
            (nn_rank_train[idx], img_paths_train[idx]),
            (nn_rank_no_train[idx], img_paths_no_train[idx])
        ]:
            html_row = []
            caption_row = []
            html_row.append(org_img_path.replace(rel_path[0], rel_path[1]))
            caption_row.append('Test Image ')
            for idx_im, im in enumerate(img_path_row):
                html_row.append(im.replace(rel_path[0], rel_path[1]))
                caption_row.append(str(nn_rank_row[idx_im]) + ' ' + str(idx))
            html_img_paths.append(html_row)
            captions.append(caption_row)
    visualize.writeHTML(out_file, html_img_paths, captions)
コード例 #10
0
ファイル: pascal_3d.py プロジェクト: maheenRashid/caffe
def script_createHistsWithSpecificAngleHtml():
    out_dir_meta='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
    train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
    layers=['pool5','fc6','fc7'];
    deg_to_see=0;
    degree=90;
    delta=5;

    out_file_html=os.path.join('/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d','hist_angle_restrict_'+str(deg_to_see)+'_'+str(degree)+'_comparison_non_compress.html');
    replace=['/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/',''];

    img_paths=[];
    captions=[];
    for dir in dirs:
        for layer in layers:
            single_row=[];
            single_row_caption=[];
            for caption_curr,file_pre in [('Trained',train_pre),('Not trained',non_train_pre)]:
                curr_dir=file_pre+'_'+layer+'_all_azimuths'
                img_path=os.path.join(curr_dir,dir+'_'+str(deg_to_see)+'_'+str(degree)+'_'+str(delta)+'_non_compress.png');
                img_path=img_path.replace(replace[0],replace[1]);
                single_row.append(img_path);
                single_row_caption.append(caption_curr+' '+dir+' '+layer);
            img_paths.append(single_row);
            captions.append(single_row_caption);

    visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=400)
コード例 #11
0
def visualizeBestTubeRank(params):
    class_labels_map = params.class_labels_map
    rel_path = params.rel_path
    out_file_html = params.out_file_html
    out_dir = params.out_dir
    score_info_file = params.score_info_file
    
    [class_labels,class_idx_map]=zip(*class_labels_map);
    class_labels=list(class_labels);
    class_idx_map=list(class_idx_map);
    [score_files,score_files_info]=pickle.load(open(score_info_file,'rb'));
    class_idx=np.unique(score_files_info[:,0]);
    tubes=np.unique(score_files_info[:,3])
    best_tubes_overall=score_files_info[:,3];
    out_file=os.path.join(out_dir,'overall.png');
    visualize.hist(best_tubes_overall,out_file,bins=tubes,normed=True,xlabel='Tube_idx',ylabel='Frequency',title='Best Tube Over All',cumulative=False)

    img_paths=[];
    captions=[];
    for class_idx_curr in class_idx:

        label=class_labels[class_idx_map.index(class_idx_curr)]
        out_file=os.path.join(out_dir,label+'.png');
        img_paths.append([out_file.replace(rel_path[0],rel_path[1])]);
        captions.append([label]);
        rel_tubes=score_files_info[score_files_info[:,0]==class_idx_curr,3];
        print class_idx_curr,rel_tubes.shape,min(rel_tubes),max(rel_tubes);
        visualize.hist(rel_tubes,out_file,bins=tubes,normed=True,xlabel='Tube Idx',ylabel='Frequency',title='Best Tube '+label,cumulative=False)        
    # visualize.save

    visualize.writeHTML(out_file_html,img_paths,captions,400,400);
コード例 #12
0
def visualizeRankedPatchesPerClass(class_score_info,num_to_display,out_file_html,rel_path,height_width):
    
    img_paths_all=[];
    captions_all=[];
    
    for selected_class,class_label,out_file in class_score_info:
        [list_scores,list_files] = pickle.load(open(out_file,'rb'));
        num_patches=len(list_files);
        img_paths,scores_picked,idx_display=getNRankedPatches(list_scores,list_files,num_to_display)
        img_paths=[img_path.replace(rel_path[0],rel_path[1]) for img_path in img_paths];
        captions=[];
        for idx_idx_curr,idx_curr in enumerate(idx_display):
            score_curr=round(scores_picked[idx_idx_curr],5)
            caption_curr=str(idx_idx_curr)+' Rank '+str(idx_curr+1)+' of '+str(num_patches)+' Score: '+str(score_curr);
            caption_curr=class_label+' '+caption_curr;
            captions.append(caption_curr);
        # captions=[[list_scores[sort_idx[idx_curr]],5))] for idx_idx_curr,idx_curr in enumerate(idx_display)]
        # print captions
        img_paths_all.append(img_paths);
        captions_all.append(captions);

    img_paths_all=np.array(img_paths_all).T
    captions_all=np.array(captions_all).T
    visualize.writeHTML(out_file_html,img_paths_all,captions_all,height_width[0],height_width[0]);
    print out_file_html
コード例 #13
0
def script_createComparativeHtmls():
    layers = ['pool5', 'fc6', 'fc7']
    path_to_anno = '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations'
    file_dir = '/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB'
    dirs = [
        dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')
    ]
    file_name = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    file_name_alt = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    replace_paths = ['/disk2', '../../../..']
    out_file_pre = 'nn_performance_comparison_trained_notrained'
    out_file_pre = os.path.join(
        '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d',
        out_file_pre)
    for layer in layers:
        file_name_l = file_name + '_' + layer
        [img_paths, gt_labels, indices,
         _] = pickle.load(open(file_name_l + '.p', 'rb'))

        idx_sort_binned = script_nearestNeigbourExperiment.sortByPerformance(
            indices, gt_labels, 1, perClass=True)

        img_paths = [
            x.replace(replace_paths[0], replace_paths[1]) for x in img_paths
        ]
        im_paths, captions = visualize.createImageAndCaptionGrid(
            img_paths, gt_labels, indices, dirs)

        file_name_l = file_name_alt + '_' + layer
        [img_paths_alt, gt_labels_alt, indices,
         _] = pickle.load(open(file_name_l + '.p', 'rb'))

        img_paths_alt = [
            x.replace(replace_paths[0], replace_paths[1])
            for x in img_paths_alt
        ]

        im_paths_alt, captions_alt = visualize.createImageAndCaptionGrid(
            img_paths, gt_labels, indices, dirs)

        im_paths_alt = [
            im_paths_alt[img_paths_alt.index(curr_img_path)]
            for curr_img_path in img_paths
        ]
        captions_alt = [
            captions_alt[img_paths_alt.index(curr_img_path)]
            for curr_img_path in img_paths
        ]

        im_paths_big = []
        captions_big = []
        for idx_curr in idx_sort_binned:
            im_paths_big.append(im_paths[idx_curr])
            im_paths_big.append(im_paths_alt[idx_curr])
            captions_big.append(captions[idx_curr])
            captions_big.append(captions_alt[idx_curr])

        visualize.writeHTML(out_file_pre + '_' + layer + '.html', im_paths_big,
                            captions_big)
コード例 #14
0
def script_visualizePascalNeighborsFromOtherClass(params):
    db_path_out = params.db_path_out
    class_id_pascal = params.class_id_pascal
    limit = params.limit
    layer = params.layer
    trainFlag = params.trainFlag
    rel_path = params.rel_path
    out_file_html = params.out_file_html
    top_n = params.top_n
    height_width = params.height_width

    mani = Imagenet_Manipulator(db_path_out)
    mani.openSession()
    vals = mani.select(
        (Imagenet.idx, Imagenet.img_path, Imagenet.class_label_imagenet, Imagenet.neighbor_index),
        (Imagenet.class_id_pascal == class_id_pascal, Imagenet.layer == layer, Imagenet.trainedClass == trainFlag),
        distinct=True,
        limit=limit,
    )
    print len(vals)

    idx_non_pascal = mani.select(
        (Imagenet.idx,),
        (Imagenet.class_id_pascal == None, Imagenet.layer == layer, Imagenet.trainedClass == trainFlag),
        distinct=True,
    )

    img_paths_html = []
    captions_html = []
    for val_curr in vals:
        html_row = []
        captions_row = []

        img_path = val_curr[1]
        imagenet_label = val_curr[2]
        nearest_neighbor = val_curr[3]
        remove_bool = np.in1d(nearest_neighbor, idx_non_pascal, invert=True)
        nearest_neighbor = np.delete(nearest_neighbor, np.where(remove_bool))

        html_row.append(img_path.replace(rel_path[0], rel_path[1]))
        captions_row.append("test image " + imagenet_label)

        for idx_curr in range(top_n):
            idx_nn = nearest_neighbor[idx_curr]
            (img_path_nn, class_id_imagenet, class_label_imagenet) = mani.select(
                (Imagenet.img_path, Imagenet.class_id_imagenet, Imagenet.class_label_imagenet),
                (Imagenet.idx == idx_nn, Imagenet.layer == layer, Imagenet.trainedClass == trainFlag),
                distinct=True,
            )[0]
            html_row.append(img_path_nn.replace(rel_path[0], rel_path[1]))
            captions_row.append(class_id_imagenet + " " + class_label_imagenet)

        img_paths_html.append(html_row)
        captions_html.append(captions_row)
        # raw_input();

    mani.closeSession()
    visualize.writeHTML(out_file_html, img_paths_html, captions_html, height_width[0], height_width[1])
コード例 #15
0
def main():
	out_dir='/disk2/januaryExperiments/jacob_pascal_2007';
	img_folder=os.path.join(out_dir,'im_results');
	res_folder=os.path.join(out_dir,'results');
	out_file_html=os.path.join(out_dir,'jacob_comparison.html');
	width_heigh=[400,400];
	rel_path=['/disk2','../../../..']
	rel_path_res=['/disk2','../../..']
	path_to_im='/disk2/pascal_voc_2007/VOCdevkit/VOC2007/JPEGImages/';
	im_list=[im_curr for im_curr in os.listdir(img_folder) if im_curr.endswith('.jpg')];
	# im_just_name=[im_curr[:im_curr.index('.')] for im_curr in im_list];
	# text_files=[os.path.join(res_folder,im_curr+'.txt') for im_curr in im_just_name]
	img_corrs=[];
	img_arrows=[];
	for im_curr in im_list:
		im_jn=im_curr[:im_curr.index('.')];
		im_txt=os.path.join(res_folder,im_jn+'.txt');
		with open(im_txt,'rb') as f:
			img_corr=f.readline();
			img_corr=img_corr[:img_corr.index(' ')];
		img_corrs.append(img_corr);
		img_arrows.append(os.path.join(img_folder,im_curr));
		# print img_corrs,img_arrow
		# raw_input();

	img_paths=[];
	captions=[];
	for img_org,img_arrow in zip(img_corrs,img_arrows):
		img_paths.append([img_org.replace(rel_path[0],rel_path[1]),img_arrow.replace(rel_path_res[0],rel_path_res[1])])
		captions.append([img_org,img_arrow]);

	visualize.writeHTML(out_file_html,img_paths,captions,400,400)
	return
	path_to_im='/disk2/pascal_voc_2007/VOCdevkit/VOC2007/JPEGImages/';
	num_im=1000;
	im_list=[os.path.join(path_to_im,im_curr) for im_curr in os.listdir(path_to_im) if im_curr.endswith('.jpg')];
	
	idx=range(len(im_list));
	random.shuffle(idx);
	idx=idx[:num_im];
	im_list=[im_list[idx_curr] for idx_curr in idx];
	
	out_dir='/disk2/januaryExperiments/jacob_pascal_2007';
	if not os.path.exists(out_dir):
		os.mkdir(out_dir);

	jacob_dir='/home/maheenrashid/Downloads/jacob/try_2/optical_flow_prediction/examples/opticalflow';
	txt_file=os.path.join(jacob_dir,'test.txt');
	res_folder=os.path.join(out_dir,'results');
	if not os.path.exists(res_folder):
		os.mkdir(res_folder);

	with open(txt_file,'wb') as f:
		for im in im_list:
			f.write(im+' 1\n');
コード例 #16
0
def makeFloHtml(out_file_html,img_files,flo_files,height=200,width=200):
    
    img_paths=[];
    captions=[];
    for img_file,flo_file in zip(img_files,flo_files):
        img_path=[];
        img_path.append(util.getRelPath(img_file,'/disk2'));
        img_path.append(util.getRelPath(flo_file,'/disk2'));
        img_paths.append(img_path);
        captions.append(['img','flo']);
    
    visualize.writeHTML(out_file_html,img_paths,captions,height,width);
コード例 #17
0
def script_writeFloVizHTML(out_file_html,out_dir_viz,flo_files,im_files,tif_files,clusters,tifAsPng=False):

	img_paths=[];
	captions=[];
	# idx=0;
	for flo_file,im_file,tif_file in zip(flo_files,im_files,tif_files):
		# print idx;
		# print tif_file
		assert os.path.exists(tif_file);
		assert os.path.exists(im_file);
		# print tif_file
		# if not os.path.exists(tif_file) or not os.path.exists(im_file) :
		# 	continue;
		file_name=util.getFileNames([flo_file],ext=False)[0];
		out_file_pre=os.path.join(out_dir_viz,file_name);
		out_file_flo_viz=out_file_pre+'_flo.png';
		out_files_tif=[out_file_pre+'_tifim_x.png',out_file_pre+'_tifim_y.png',out_file_pre+'_tifflo.png'];
		if not os.path.exists(out_file_flo_viz):
			flo=util.readFlowFile(flo_file);
			po.saveFloFileViz(flo_file,out_file_flo_viz);
		for idx,out_file_tif_viz in enumerate(out_files_tif):
			tif=scipy.misc.imread(tif_file)[:,:,:2];
			if idx==0 and not os.path.exists(out_file_tif_viz):
				tif_flo=replaceClusterIdWithFlow(tif,clusters);
				po.saveMatFloViz(tif_flo,out_file_tif_viz);

			if not os.path.exists(out_file_tif_viz) and idx==1:
				tif_x=np.array(tif[:,:,0]*(255.0/clusters.shape[0]),dtype=int);
				tif_x=np.dstack((tif_x,tif_x,tif_x));
				scipy.misc.imsave(out_file_tif_viz,tif_x);

			if not os.path.exists(out_file_tif_viz) and idx==2:
				tif_x=np.array(tif[:,:,1]*(255.0/clusters.shape[0]),dtype=int);
				tif_x=np.dstack((tif_x,tif_x,tif_x));
				scipy.misc.imsave(out_file_tif_viz,tif_x);

			
		img_paths_curr=[im_file,out_file_flo_viz]+out_files_tif;
		im_name=util.getFileNames([im_file],ext=False)[0];
		captions_curr=[im_name,'flo_viz']+['tif_flo_viz']*len(out_files_tif)

		# if tifAsPng:
		# 	img_paths_curr.append(out_file_tif_viz.replace('_x.png','_y.png'));
		# 	captions_curr.append('tif_flo_viz');

		img_paths_curr=[util.getRelPath(file_curr) for file_curr in img_paths_curr];
		img_paths.append(img_paths_curr);
		captions.append(captions_curr);
		# idx=idx+1;

	visualize.writeHTML(out_file_html,img_paths,captions)
コード例 #18
0
def makeFloVizHTML(out_file_html,img_paths,dir_flo_viz):
    # out_file_html=os.path.join(out_dir,'flo_viz.html');
    img_paths_html=[];
    captions_html=[];
    for img_path,img_file_name in zip(img_paths,util.getFileNames(img_paths,ext=False)):
        out_file_flo_viz=os.path.join(dir_flo_viz,img_file_name+'.png');
        if img_path.startswith('/disk2'):
            img_path='/disk3'+img_path;

        img_paths_curr=[util.getRelPath(img_path,'/disk3'),util.getRelPath(out_file_flo_viz,'/disk3')];
        img_paths_html.append(img_paths_curr);
        captions_html.append([img_file_name,'flo']);

    visualize.writeHTML(out_file_html,img_paths_html,captions_html);
コード例 #19
0
def script_testOnYoutube():
    val_file='/disk2/mayExperiments/finetuning_youtube_hmdb_llr/val_eq.txt'
    out_dir='/disk2/mayExperiments/eval_ucf_finetune';
    clusters_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat';
    gpu=0;

    util.mkdir(out_dir);
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/final.caffemodel';
    # out_dir_model=os.path.join(out_dir,'original_model');

    model_file='/disk2/mayExperiments/ft_youtube_hmdb_ucfClusters/OptFlow_youtube_hmdb__iter_55000.caffemodel';
    out_dir_model=os.path.join(out_dir,'ft_ucf_model');

    util.mkdir(out_dir_model);
    out_dir_flo=os.path.join(out_dir_model,'flo');
    out_dir_flo_viz=os.path.join(out_dir_model,'flo_viz');
    util.mkdir(out_dir_flo);util.mkdir(out_dir_flo_viz)

    num_to_pick=20;

    img_paths=util.readLinesFromFile(val_file);
    img_paths=[img_path[:img_path.index(' ')] for img_path in img_paths];
    class_names=[file_curr[:file_curr.index('_')] for file_curr in util.getFileNames(img_paths)];
    classes=list(set(class_names));
    class_names=np.array(class_names);
    
    img_paths_test=[];
    for class_curr in classes:
        idx_rel=np.where(class_names==class_curr)[0];
        idx_rel=idx_rel[:num_to_pick];
        img_paths_test.extend([img_paths[idx_curr] for idx_curr in idx_rel]);

    # po.script_saveFlosAndViz(img_paths_test,out_dir_flo,out_dir_flo_viz,gpu,model_file,clusters_file);

    out_file_html=os.path.join(out_dir,'model_comparison.html');
    out_dirs_flo_viz=[os.path.join(out_dir,'original_model','flo_viz'),os.path.join(out_dir,'ft_ucf_model','flo_viz')];
    out_dirs_flo_viz_captions=['original_model','ft_ucf_model'];
    img_paths_html=[];
    captions_html=[];
    img_names=util.getFileNames(img_paths_test,ext=False);
    for img_path_test,img_name in zip(img_paths_test,img_names):
        row_curr=[];
        row_curr.append(util.getRelPath(img_path_test));
        for out_dir_curr in out_dirs_flo_viz:
            file_curr=os.path.join(out_dir_curr,img_name+'.png');
            row_curr.append(util.getRelPath(file_curr));
        captions_curr=[img_name]+out_dirs_flo_viz_captions;
        img_paths_html.append(row_curr)
        captions_html.append(captions_curr);
    visualize.writeHTML(out_file_html,img_paths_html,captions_html);
コード例 #20
0
def script_visualizeRankDifferenceAsHist(params):
    out_file_pre = params.out_file_pre;
    out_file_html = params.out_file_html;
    rel_path = params.rel_path;
    class_ids = params.class_id;
    layers = params.layer
    
    if not hasattr(class_ids, '__iter__'):
        class_ids = [class_ids];
    
    if not hasattr(class_ids, '__iter__'):
        layers = [layers];

    img_paths_html=[];
    captions=[];
    for class_id in class_ids:
        img_paths_html_row=[];
        captions_row=[];
        for layer in layers:
            params = params._replace(class_id=class_id)
            params = params._replace(layer=layer)
            output=getNNRankComparisonInfo(params);
            indices_difference=experiments_super.getDifferenceInRank(output['img_paths_nn_train'],
                                                output['img_paths_nn_no_train'],
                                                output['nn_rank_train'],
                                                output['nn_rank_no_train']);
            xlabel='Difference in Rank (Train - Untrained)';
            ylabel='Frequency'
            title=class_id+' '+layer
            indices_difference=[diff_curr for diffs_curr in indices_difference for diff_curr in diffs_curr]
            
            if len(indices_difference)==0:
                continue;
            
            out_file_im=out_file_pre+'_'+str(params.angle)+'_'+str(params.diff)+'_'+str(params.delta)+'_'+class_id+'_'+layer+'.png';
            img_paths_html_row.append(out_file_im.replace(rel_path[0],rel_path[1]));
            total=len(indices_difference)
            sum_less=sum(np.array(indices_difference)<0)/float(total);
            sum_less='%0.2f'%(sum_less,)
            sum_more=sum(np.array(indices_difference)>=0)/float(total);
            sum_more='%0.2f'%(sum_more,)
            captions_row.append('Total '+str(total)+', <0: '+sum_less+', >0: '+sum_more);
            
            visualize.hist(indices_difference,out_file=out_file_im,bins=params.bins,normed=params.normed,xlabel=xlabel,ylabel=ylabel,title=title);
            
        img_paths_html.append(img_paths_html_row);
        captions.append(captions_row);

    visualize.writeHTML(out_file_html,img_paths_html,captions,params.height_width[0],params.height_width[1]);
コード例 #21
0
def comparativeLossViz(img_dirs,
                       file_post,
                       loss_post,
                       range_batches,
                       range_images,
                       out_file_html,
                       dir_server,
                       img_caption_pre=None):
    img_files_all = []
    captions_all = []
    if img_caption_pre is not None:
        assert len(img_caption_pre) == len(img_dirs)

    for batch_num in range_batches:
        # range(1,num_batches+1):
        for im_num in range_images:
            for idx_img_dir, img_dir in enumerate(img_dirs):
                loss_all = np.load(
                    os.path.join(img_dir,
                                 str(batch_num) + loss_post))
                if im_num > loss_all.shape[0]:
                    continue

                loss_curr = loss_all[im_num - 1, 0]
                loss_str = "{:10.4f}".format(loss_curr)
                files_curr = [
                    os.path.join(
                        img_dir,
                        str(batch_num) + '_' + str(im_num) + file_post_curr)
                    for file_post_curr in file_post
                ]
                files_curr = [
                    util.getRelPath(file_curr, dir_server)
                    for file_curr in files_curr
                ]
                captions_curr = [
                    os.path.split(file_curr)[1] + ' ' + loss_str
                    for file_curr in files_curr
                ]
                if img_caption_pre is not None:
                    captions_curr = [
                        img_caption_pre[idx_img_dir] + ' ' + caption_curr
                        for caption_curr in captions_curr
                    ]
                img_files_all.append(files_curr)
                captions_all.append(captions_curr)

    visualize.writeHTML(out_file_html, img_files_all, captions_all, 224, 224)
コード例 #22
0
ファイル: pascal_3d.py プロジェクト: maheenRashid/caffe
def script_createHistComparative():
    out_dir_meta='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d';
    train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_pre='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    dirs=[dir[:-7] for dir in os.listdir('/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations') if dir.endswith('_pascal')];
    layers=['pool5','fc6','fc7'];
    delta=5;
    caption_text=['Trained','Not Trained'];
    replace=[out_dir_meta+'/',''];
    degree=90;
    deg_to_see=0;
    # train_files=[os.path.join(train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
    # non_train_files=[os.path.join(non_train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
    # for idx in range(len(train_files)):

    combos=[(dir,layer) for dir in dirs for layer in layers];
    out_file_html=os.path.join(out_dir_meta,'hist_by_degree_'+str(degree)+'_comparisons_compress.html');
    img_paths=[];
    captions=[];

    for dir,layer in combos:

        file_train=os.path.join(train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p');
        # train_files[idx];
        file_non_train=os.path.join(non_train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p');
        # non_train_files[idx];

        hists_train,bins_train=pickle.load(open(file_train,'rb'));
        hists_non_train,bins_non_train=pickle.load(open(file_non_train,'rb'));
        
        mid_points_train=[bins_train[i]+bins_train[i+1]/float(2) for i in range(len(bins_train)-1)];
        mid_points_non_train=[bins_non_train[i]+bins_non_train[i+1]/float(2) for i in range(len(bins_non_train)-1)];
        
        # dir=file_train[file_train.rindex('/')+1:];
        # dir=dir[:dir.index('_')];
        out_file_just_file=layer+'_'+dir+'_'+str(degree)+'_'+str(delta)+'.png'
        out_file=os.path.join(out_dir_meta,out_file_just_file)
        title=dir+' Comparison';
        xlabel='Distance Rank';
        ylabel='Frequency';

        # print out_file
        img_paths.append([out_file_just_file]);
        captions.append([dir+' '+layer]);

        visualize.plotSimple(zip([mid_points_train,mid_points_non_train],[hists_train,hists_non_train]),out_file,title=title,xlabel=xlabel,ylabel=ylabel,legend_entries=['Trained','Non Trained'],loc=0);
    print out_file_html
    visualize.writeHTML(out_file_html,img_paths,captions,width=400,height=400);
コード例 #23
0
def script_compareAzimuth():
    path_to_anno = '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations/chair_pascal'
    im_dir = '/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB'
    out_file_html = '/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB/chair_angle_check.html'
    anno_files = [
        os.path.join(path_to_anno, file_name)
        for file_name in os.listdir(path_to_anno) if file_name.endswith('.mat')
    ]

    list_of_chairs = []
    for anno_file in anno_files:
        just_file = anno_file[anno_file.rindex('/') + 1:]
        just_file = just_file[:-4]

        curr_dict = scipy.io.loadmat(anno_file,
                                     squeeze_me=True,
                                     struct_as_record=False)
        objects = curr_dict['record'].objects
        if not hasattr(objects, '__iter__'):
            objects = [objects]
        for idx, obj in enumerate(objects):
            if obj.__dict__['class'] == 'chair':
                im_file = os.path.join(
                    im_dir, just_file + '_chair_' + str(idx) + '.jpg')
                list_of_chairs.append((im_file, obj.viewpoint.azimuth_coarse))
    angles = list(zip(*list_of_chairs)[1])
    images = list(zip(*list_of_chairs)[0])
    angles = np.array(angles)
    angles_uni = np.unique(angles)
    col_im = []
    col_caption = []
    for angle_uni in angles_uni:
        idx_uni = np.where(angles == angle_uni)[0]
        row_im_curr = []
        row_caption_curr = []
        for idx_curr in range(min(5, len(idx_uni))):
            idx_im = idx_uni[idx_curr]
            image_just_name = images[idx_im]
            image_just_name = image_just_name[image_just_name.rindex('/') + 1:]
            row_im_curr.append(image_just_name)
            row_caption_curr.append(str(angle_uni))
        col_im.append(row_im_curr)
        col_caption.append(row_caption_curr)

    print col_im[:5]
    print col_caption[:5]

    visualize.writeHTML(out_file_html, col_im, col_caption)
コード例 #24
0
def makeImTifViz(img_paths_all,tif_paths_all,out_file_html,out_dir_tif,num_clusters=40,disk_path='/disk2'):
	out_files_tif_x=[os.path.join(out_dir_tif,img_name+'_x.png') for img_name in util.getFileNames(tif_paths_all,ext='False')];
	out_files_tif_y=[os.path.join(out_dir_tif,img_name+'_y.png') for img_name in util.getFileNames(tif_paths_all,ext='False')];
	

	for tif_path,out_file_x,out_file_y in zip(tif_paths_all,out_files_tif_x,out_files_tif_y):
		tif=scipy.misc.imread(tif_path);
		# print np.min(tif[:,:,:2]),np.max(tif[:,:,:2])
		assert np.min(tif[:,:,:2])>0 and np.max(tif[:,:,:2])<num_clusters+1;
		saveTifGray(tif,out_file_x,out_file_y,num_clusters)
	
	# out_file_html=out_dir_tif+'.html';
	img_paths_html=[[util.getRelPath(img_curr,disk_path) for img_curr in img_list] for img_list in zip(img_paths_all,out_files_tif_x,out_files_tif_y)];
	# captions_html=[[util.getFileNames([img_curr],ext=False)[0] for img_curr in img_list] for img_list in zip(img_paths_all,out_files_tif_x,out_files_tif_y)];
	captions_html=[['Image','Tif_x','Tif_y']]*len(img_paths_html);
	visualize.writeHTML(out_file_html,img_paths_html,captions_html);
コード例 #25
0
def saveHTML(out_us, us_test, batch_size=50, num_iter=2):
    dir_server = './'
    post_us = ['_gt_pts.npy', '_pred_pts.npy']

    im_paths, gt_pt_files, pred_pt_files = us_getFilePres(
        us_test, out_us, post_us, num_iter, batch_size)
    errors_curr = us_getErrorsAll(us_test, out_us, post_us, num_iter,
                                  batch_size)
    err = np.array(errors_curr)
    bin_keep = err >= 0
    err[err < 0] = 0
    div = np.sum(bin_keep, 1)
    sum_val = np.sum(err, 1).astype(np.float)
    avg = sum_val / div

    post_ims_us = [
        '_org_nokp.jpg',
        '_gt.jpg',
        '_warp_nokp.jpg',
        '_warp.jpg',
        '_org.jpg',
    ]
    captions_for_row = [
        'Input', 'Ground Truth', 'Warped Image', 'Prediction Warped',
        'Prediction'
    ]
    out_file_html = os.path.join(out_us, 'results.html')
    idx_sort = np.argsort(avg)
    ims = []
    captions = []
    for idx_idx, idx_curr in enumerate(idx_sort):
        file_curr = gt_pt_files[idx_curr]
        file_curr = os.path.split(file_curr)[1]
        file_curr = file_curr[:file_curr.index('_gt')]
        files_us = [
            os.path.join(dir_server, file_curr + post_im_curr)
            for post_im_curr in post_ims_us
        ]
        captions_us = [
            str(idx_idx) + ' ' + caption_curr
            for caption_curr in captions_for_row
        ]
        ims.append(files_us)
        captions.append(captions_us)

    visualize.writeHTML(out_file_html, ims, captions)
    print out_file_html
コード例 #26
0
def script_createHistsWithSpecificAngleHtml():
    out_dir_meta = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d'
    train_pre = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_pre = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    dirs = [
        dir[:-7] for dir in os.listdir(
            '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations')
        if dir.endswith('_pascal')
    ]
    layers = ['pool5', 'fc6', 'fc7']
    deg_to_see = 0
    degree = 90
    delta = 5

    out_file_html = os.path.join(
        '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d',
        'hist_angle_restrict_' + str(deg_to_see) + '_' + str(degree) +
        '_comparison_non_compress.html')
    replace = [
        '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/',
        ''
    ]

    img_paths = []
    captions = []
    for dir in dirs:
        for layer in layers:
            single_row = []
            single_row_caption = []
            for caption_curr, file_pre in [('Trained', train_pre),
                                           ('Not trained', non_train_pre)]:
                curr_dir = file_pre + '_' + layer + '_all_azimuths'
                img_path = os.path.join(
                    curr_dir, dir + '_' + str(deg_to_see) + '_' + str(degree) +
                    '_' + str(delta) + '_non_compress.png')
                img_path = img_path.replace(replace[0], replace[1])
                single_row.append(img_path)
                single_row_caption.append(caption_curr + ' ' + dir + ' ' +
                                          layer)
            img_paths.append(single_row)
            captions.append(single_row_caption)

    visualize.writeHTML(out_file_html,
                        img_paths,
                        captions,
                        height=300,
                        width=400)
コード例 #27
0
def script_compareHashWithToyExperiment(params):
    in_file = params.in_file
    num_hash_tables_all = params.num_hash_tables_all
    key_type = params.key_type
    out_file_indices = params.out_file_indices
    out_file_pres = params.out_file_pres
    out_file_html = params.out_file_html
    rel_path = params.rel_path

    [
        features_test, features_train, labels_test, labels_train, _, _,
        indices, _
    ] = pickle.load(open(in_file, 'rb'))
    visualize.saveMatAsImage(indices, out_file_indices)

    hammings = []
    for out_file_pre, num_hash_tables in zip(out_file_pres,
                                             num_hash_tables_all):
        indices_hash = getIndicesHash(features_test, features_train,
                                      num_hash_tables, key_type)
        visualize.saveMatAsImage(indices_hash, out_file_pre + '.png')
        hamming = util.getHammingDistance(indices, indices_hash)
        pickle.dump([indices_hash, indices, hamming],
                    open(out_file_pre + '.p', 'wb'))

        hammings.append(np.mean(hamming))

    sizes = scipy.misc.imread(out_file_indices)
    sizes = sizes.shape

    im_files_html = []
    captions_html = []
    for idx, out_file_pre in enumerate(out_file_pres):
        out_file_curr = out_file_pre + '.png'
        key_str = str(key_type)
        key_str = key_str.replace('<type ', '').replace('>', '')
        caption_curr = 'NN Hash. Num Hash Tables: ' + str(
            num_hash_tables_all[idx]) + ' ' + 'Hamming Distance: ' + str(
                hammings[idx])
        im_files_html.append([
            out_file_indices.replace(rel_path[0], rel_path[1]),
            out_file_curr.replace(rel_path[0], rel_path[1])
        ])
        captions_html.append(['NN cosine', caption_curr])

    visualize.writeHTML(out_file_html, im_files_html, captions_html,
                        sizes[0] / 2, sizes[1] / 2)
コード例 #28
0
def visualizeBestTubeRank(params):
    class_labels_map = params.class_labels_map
    rel_path = params.rel_path
    out_file_html = params.out_file_html
    out_dir = params.out_dir
    score_info_file = params.score_info_file

    [class_labels, class_idx_map] = zip(*class_labels_map)
    class_labels = list(class_labels)
    class_idx_map = list(class_idx_map)
    [score_files, score_files_info] = pickle.load(open(score_info_file, 'rb'))
    class_idx = np.unique(score_files_info[:, 0])
    tubes = np.unique(score_files_info[:, 3])
    best_tubes_overall = score_files_info[:, 3]
    out_file = os.path.join(out_dir, 'overall.png')
    visualize.hist(best_tubes_overall,
                   out_file,
                   bins=tubes,
                   normed=True,
                   xlabel='Tube_idx',
                   ylabel='Frequency',
                   title='Best Tube Over All',
                   cumulative=False)

    img_paths = []
    captions = []
    for class_idx_curr in class_idx:

        label = class_labels[class_idx_map.index(class_idx_curr)]
        out_file = os.path.join(out_dir, label + '.png')
        img_paths.append([out_file.replace(rel_path[0], rel_path[1])])
        captions.append([label])
        rel_tubes = score_files_info[score_files_info[:, 0] == class_idx_curr,
                                     3]
        print class_idx_curr, rel_tubes.shape, min(rel_tubes), max(rel_tubes)
        visualize.hist(rel_tubes,
                       out_file,
                       bins=tubes,
                       normed=True,
                       xlabel='Tube Idx',
                       ylabel='Frequency',
                       title='Best Tube ' + label,
                       cumulative=False)
    # visualize.save

    visualize.writeHTML(out_file_html, img_paths, captions, 400, 400)
コード例 #29
0
def script_createHistDifferenceHTML():
    out_dir_meta = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d'
    train_pre = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_pre = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    dirs = [
        dir[:-7] for dir in os.listdir(
            '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations')
        if dir.endswith('_pascal')
    ]
    layers = ['pool5', 'fc6', 'fc7']
    degrees = [0, 45, 90, 135, 180]
    delta = 5
    caption_text = ['Trained', 'Not Trained']
    replace = [out_dir_meta + '/', '']
    degree = 90
    for layer in layers:
        out_file_html = os.path.join(out_dir_meta,
                                     layer + '_all_azimuths' + '.html')

        img_paths = []
        caption_paths = []
        for dir in dirs:

            img_paths_row = []
            caption_paths_row = []

            for idx, file_pre in enumerate([train_pre, non_train_pre]):
                curr_dir = os.path.join(file_pre + '_' + layer +
                                        '_all_azimuths')
                im_file = os.path.join(
                    curr_dir, dir + '_' + str(degree) + '_' + str(delta) +
                    '_compress.png')

                img_paths_row.append(im_file.replace(replace[0], replace[1]))
                caption_paths_row.append(caption_text[idx] + ' ' + layer +
                                         ' ' + dir)

            img_paths.append(img_paths_row)
            caption_paths.append(caption_paths_row)

        visualize.writeHTML(out_file_html,
                            img_paths,
                            caption_paths,
                            height=400,
                            width=400)
        print out_file_html
コード例 #30
0
def script_writeHTMLStitchedFlos_wDirs(img_paths,out_file_html,viz_dirs):
    img_paths_html=[];
    captions=[];

    for img_path in img_paths:
        img_name=img_path[img_path.rindex('/')+1:img_path.rindex('.')];
        img_paths_html_curr=[util.getRelPath(img_path)];
        captions_curr=['im']
        for viz_dir in viz_dirs:
            print viz_dir,img_path
            # img_path_curr=[os.path.join(viz_dir,file_curr) for file_curr in os.listdir(viz_dir) if file_curr.startswith(img_name)][0];
            img_path_curr=os.path.join(viz_dir,img_name+'.png');
            img_paths_html_curr.append(util.getRelPath(img_path_curr));
            captions_curr.append(viz_dir[viz_dir.rindex('/')+1:]);
        img_paths_html.append(img_paths_html_curr);
        captions.append(captions_curr)
    
    visualize.writeHTML(out_file_html,img_paths_html,captions);
コード例 #31
0
def script_toyNNExperiment(params):
    path_to_db = params.path_to_db;
    class_id_pascal = params.class_id_pascal;
    video_id = params.video_id;
    shot_id = params.shot_id;
    tube_id = params.tube_id;
    numberofVideos = params.numberofVideos;
    numberOfFrames = params.numberOfFrames;
    out_file_html = params.out_file_html;
    rel_path = params.rel_path;
    out_file_hist = params.out_file_hist;
    gpuFlag = params.gpuFlag;
    dtype = params.dtype;
    pascal_ids = params.pascal_ids;
    video_info = params.video_info;
    out_file_pickle = params.out_file_pickle

    info_for_extraction=getInfoForFeatureExtractionForVideo(path_to_db,video_info,numberOfFrames);    
    video_info={class_id_pascal:[video_id]}
    info_for_extraction_query=getInfoForExtractionForTube(path_to_db,class_id_pascal,video_id,shot_id,tube_id)
    features_train,labels_train,img_paths_train=setUpFeaturesMat(info_for_extraction,dtype=dtype);
    features_test,labels_test,img_paths_test=setUpFeaturesMat(info_for_extraction_query,dtype=dtype);
    # features_test,labels_test,img_paths_test=setUpFeaturesMat(info_for_extraction,dtype=dtype);
    indices,distances=nearest_neighbor.getNearestNeighbors(features_test,features_train,gpuFlag=gpuFlag);
    
    img_paths_html=[];
    captions_html=[];
    record_wrong=[]
    for r in range(indices.shape[0]):
        img_paths_row=[img_paths_test[r].replace(rel_path[0],rel_path[1])];
        captions_row=[labels_test[r]];
        for c in range(indices.shape[1]):
            rank=indices[r,c];
            img_paths_row.append(img_paths_train[rank].replace(rel_path[0],rel_path[1]))
            captions_row.append(labels_train[rank]);
            if labels_train[rank]!=labels_test[r]:
                record_wrong.append(c);
        img_paths_html.append(img_paths_row);
        captions_html.append(captions_row);

    visualize.writeHTML(out_file_html,img_paths_html,captions_html);
    visualize.hist(record_wrong,out_file_hist,bins=20,normed=True,xlabel='Rank of Incorrect Class',ylabel='Frequency',title='')
    pickle.dump([features_test,features_train,labels_test,labels_train,img_paths_test,img_paths_train,indices,distances],open(out_file_pickle,'wb'));
コード例 #32
0
ファイル: pascal_3d.py プロジェクト: maheenRashid/caffe
def script_compareAzimuth():
    path_to_anno='/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations/chair_pascal';
    im_dir='/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB';
    out_file_html='/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB/chair_angle_check.html'
    anno_files=[os.path.join(path_to_anno,file_name) for file_name in os.listdir(path_to_anno) if file_name.endswith('.mat')];

    list_of_chairs=[];
    for anno_file in anno_files:
        just_file=anno_file[anno_file.rindex('/')+1:];
        just_file=just_file[:-4];

        curr_dict=scipy.io.loadmat(anno_file,squeeze_me=True, struct_as_record=False);
        objects=curr_dict['record'].objects
        if not hasattr(objects, '__iter__'):
            objects=[objects]
        for idx,obj in enumerate(objects):
            if obj.__dict__['class']=='chair':
                im_file=os.path.join(im_dir,just_file+'_chair_'+str(idx)+'.jpg');
                list_of_chairs.append((im_file,obj.viewpoint.azimuth_coarse));
    angles=list(zip(*list_of_chairs)[1]);
    images=list(zip(*list_of_chairs)[0]);
    angles=np.array(angles)
    angles_uni=np.unique(angles);
    col_im=[];
    col_caption=[];
    for angle_uni in angles_uni:
        idx_uni=np.where(angles==angle_uni)[0];
        row_im_curr=[];
        row_caption_curr=[];
        for idx_curr in range(min(5,len(idx_uni))):
            idx_im=idx_uni[idx_curr]
            image_just_name=images[idx_im]
            image_just_name=image_just_name[image_just_name.rindex('/')+1:];
            row_im_curr.append(image_just_name);
            row_caption_curr.append(str(angle_uni));
        col_im.append(row_im_curr);
        col_caption.append(row_caption_curr);

    print col_im[:5];
    print col_caption[:5];
    
    visualize.writeHTML(out_file_html,col_im,col_caption)
コード例 #33
0
def script_writeHTMLStitchedFlos(out_file_html,out_file,out_dir,grid_sizes=[1,2,4,8],grid_dir_pre='grid_flo_viz_'):
    img_paths=util.readLinesFromFile(out_file);
    
    viz_dirs=[os.path.join(out_dir,grid_dir_pre+str(num)) for num in grid_sizes];
    img_paths_html=[];
    captions=[];

    for img_path in img_paths:
        img_name=img_path[img_path.rindex('/')+1:img_path.rindex('.')];
        img_paths_html_curr=[util.getRelPath(img_path)];
        captions_curr=['im']
        for viz_dir in viz_dirs:
            print viz_dir,img_path
            img_path_curr=[os.path.join(viz_dir,file_curr) for file_curr in os.listdir(viz_dir) if file_curr.startswith(img_name)][0];
            img_paths_html_curr.append(util.getRelPath(img_path_curr));
            captions_curr.append(viz_dir[viz_dir.rindex('/')+1:]);
        img_paths_html.append(img_paths_html_curr);
        captions.append(captions_curr)
    
    visualize.writeHTML(out_file_html,img_paths_html,captions);
コード例 #34
0
def script_doEverything(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale_all, scale_images,lim_cases,gpu,model_file,clusters_file,train_val_file=None,overwrite=False):
    for scale in scale_all:
        # script_saveImCrops(path_to_npy,path_to_im,ext,lim,out_file,out_dir_scratch,window,step_size,thresh,scale_info,scale,scale_images,lim_cases)

        for scale_image in scale_images:
            dir_scale=os.path.join(out_dir_scratch,scale+'_'+str(scale_image));
            
            scale_info=pickle.load(open(out_file,'rb'));
            img_dirs=[os.path.join(dir_scale,im_curr_info[0]) for im_curr_info in scale_info[scale][:lim_cases]]
            
            for img_dir in img_dirs:
                img_paths=util.getFilesInFolder(img_dir,ext='.png');
                
                if len(img_paths)==0:
                    'CONTINUING'
                    continue;

                img_paths=[img_path for img_path in img_paths if not img_path.endswith('onImg.png')];
                out_dir_flo=img_dir+'_pred_flo';
                out_dir_flo_viz=img_dir+'_pred_flo_viz';
                util.mkdir(out_dir_flo);
                util.mkdir(out_dir_flo_viz);
                po.script_saveFlosAndViz(img_paths,out_dir_flo,out_dir_flo_viz,gpu,model_file,clusters_file,train_val_file=train_val_file,overwrite=overwrite)
                img_names=util.getFileNames(img_paths,ext=False);
                out_dir_flo=img_dir+'_pred_flo';
                out_dir_flo_viz=img_dir+'_pred_flo_viz';
                out_file_html=img_dir+'.html';

                
                img_paths_html=[];
                captions_all=[];
                for img_name in img_names:
                    row_curr=[];
                    row_curr.append(util.getRelPath(os.path.join(img_dir,img_name+'_onImg.png')));
                    row_curr.append(util.getRelPath(os.path.join(img_dir,img_name+'.png')));
                    row_curr.append(util.getRelPath(os.path.join(out_dir_flo_viz,img_name+'.png')));
                    captions=['','','']
                    img_paths_html.append(row_curr);
                    captions_all.append(captions);

                visualize.writeHTML(out_file_html,img_paths_html,captions_all);
コード例 #35
0
def makeClusterHTML(out_file_html, labels, num_cols, size_im, dir_server):
    ims = []
    captions = []
    start_idx = 0
    while start_idx < len(labels):
        row_curr = []
        caption_curr = []
        if start_idx + num_cols > len(labels):
            num_cols_real = len(labels) - start_idx
        else:
            num_cols_real = num_cols
        for col_no in range(num_cols_real):
            idx_curr = start_idx + col_no
            label_curr = labels[idx_curr]
            row_curr.append(util.getRelPath(label_curr, dir_server))
            caption_curr.append('')
        ims.append(row_curr)
        captions.append(caption_curr)
        start_idx = start_idx + num_cols_real
    visualize.writeHTML(out_file_html, ims, captions, size_im, size_im)
    print out_file_html.replace(dir_server,
                                'http://vision1.idav.ucdavis.edu:1000')
コード例 #36
0
ファイル: pascal_3d.py プロジェクト: maheenRashid/caffe
def script_createComparativeHtmls():
    layers=['pool5','fc6','fc7'];
    path_to_anno='/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations';
    file_dir='/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB';
    dirs=[dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')];
    file_name='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    file_name_alt='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    replace_paths=['/disk2','../../../..']
    out_file_pre='nn_performance_comparison_trained_notrained'
    out_file_pre=os.path.join('/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d',out_file_pre);
    for layer in layers:
        file_name_l=file_name+'_'+layer;
        [img_paths,gt_labels,indices,_]=pickle.load(open(file_name_l+'.p','rb'));

        idx_sort_binned=script_nearestNeigbourExperiment.sortByPerformance(indices,gt_labels,1,perClass=True);
        
        img_paths=[x.replace(replace_paths[0],replace_paths[1]) for x in img_paths];
        im_paths,captions=visualize.createImageAndCaptionGrid(img_paths,gt_labels,indices,dirs)

        file_name_l=file_name_alt+'_'+layer;
        [img_paths_alt,gt_labels_alt,indices,_]=pickle.load(open(file_name_l+'.p','rb'));

        img_paths_alt=[x.replace(replace_paths[0],replace_paths[1]) for x in img_paths_alt];
        
        im_paths_alt,captions_alt=visualize.createImageAndCaptionGrid(img_paths,gt_labels,indices,dirs)        
        
        im_paths_alt=[im_paths_alt[img_paths_alt.index(curr_img_path)] for curr_img_path in img_paths];
        captions_alt=[captions_alt[img_paths_alt.index(curr_img_path)] for curr_img_path in img_paths];

        im_paths_big=[];
        captions_big=[];
        for idx_curr in idx_sort_binned:
            im_paths_big.append(im_paths[idx_curr]);
            im_paths_big.append(im_paths_alt[idx_curr]);
            captions_big.append(captions[idx_curr]);
            captions_big.append(captions_alt[idx_curr]);
            
        visualize.writeHTML(out_file_pre+'_'+layer+'.html',im_paths_big,captions_big)
コード例 #37
0
def script_visualizeSegResults(pred_file,gt_output_file,gt_data_output_file,out_file_html,rel_path,means,out_dir):
	data=np.load(gt_data_output_file);
	gt= np.load(gt_output_file);
	out = np.load(pred_file);

	print data.shape;
	im_paths=[];captions=[];
	
	for im_no in range(data.shape[0]):
		print im_no;
		data_path=os.path.join(out_dir,str(im_no)+'_data.png');
		gt_path=os.path.join(out_dir,str(im_no)+'_gt.png');
		pred_path=os.path.join(out_dir,str(im_no)+'_pred.png');
		# scipy.misc.imsave(data_path,reshapeMat(data[im_no],means));
		visualize.saveMatAsImage(reshapeMat(data[im_no],means)/255,data_path);
		visualize.saveMatAsImage(reshapeMat(gt[im_no],means),gt_path);
		visualize.saveMatAsImage(reshapeMat(out[im_no],means),pred_path);
		im_paths.append([data_path.replace(rel_path[0],rel_path[1]),gt_path.replace(rel_path[0],rel_path[1]),pred_path.replace(rel_path[0],rel_path[1])]);
		captions.append(['im','mask_gt','mask_pred']);
		# if im_no==10:
		# 	break;

	visualize.writeHTML(out_file_html,im_paths,captions,height=224,width=224);
コード例 #38
0
def compareMagInfoLayerTime(dirs,out_dirs,model_num,num_iters,file_pre_grad,file_pre_weight,out_file_html):
	img_paths=[];
	captions=[];

	for dir_curr,out_dir_curr in zip(dirs,out_dirs):
		dict_for_plotting={'grads_mag':OrderedDict(),'weights_mag':OrderedDict(),'ratios':OrderedDict()};

		dict_for_plotting=OrderedDict(dict_for_plotting.items());
		for model_num_curr in model_num:
			print model_num_curr;
			file_pairs=[(os.path.join(dir_curr,model_num_curr,file_pre_grad+str(iter_curr)+'.npy'),
					os.path.join(dir_curr,model_num_curr,file_pre_weight+str(iter_curr)+'.npy')) for iter_curr in num_iters];
			means,_=getMagInfo(file_pairs,alt=True);

			dict_for_plotting['grads_mag'][model_num_curr]=means[0];
			dict_for_plotting['weights_mag'][model_num_curr]=means[1];
			dict_for_plotting['ratios'][model_num_curr]=means[2];

		img_paths_curr=[];
		captions_curr=[];
		for key_curr in dict_for_plotting.keys():
			out_file_curr=os.path.join(out_dir_curr,key_curr+'.png');
			data=dict_for_plotting[key_curr];
			xAndYs=data.values();
			legend_entries=data.keys();
			xAndYs=[(range(len(x_curr)),x_curr) for x_curr in xAndYs];
			visualize.plotSimple(xAndYs,out_file_curr,title=key_curr,xlabel='layer',ylabel='magnitude',legend_entries=legend_entries,outside=True);
			print out_file_curr.replace('/disk3','vision3.cs.ucdavis.edu:1001');
			img_paths_curr.append(util.getRelPath(out_file_curr,'/disk3'));
			# print dir_curr.split('/');
			captions_curr.append(dir_curr.split('/')[-2]+' '+dir_curr.split('/')[-1]+' '+key_curr);

		img_paths.append(img_paths_curr);
		captions.append(captions_curr);

	visualize.writeHTML(out_file_html,img_paths,captions,height=500,width=500);
	print out_file_html.replace('/disk3','vision3.cs.ucdavis.edu:1001');
コード例 #39
0
def script_visualizeTifAsIm(tif_dir,im_dir,inc,out_tif,out_file_html,rel_path):

    tif_files=[file_curr for file_curr in os.listdir(tif_dir) if file_curr.endswith('.tif')];
    num_files=[int(file_curr[file_curr.rindex('_')+1:file_curr.rindex('.')]) for file_curr in tif_files];
    tif_files_sorted=[];
    num_files_sorted=num_files[:];
    num_files_sorted.sort();
    for idx in range(len(num_files)):
        num_curr=num_files_sorted[idx];
        file_curr=tif_files[num_files.index(num_curr)];
        tif_files_sorted.append(file_curr);

    rows_all=[];
    captions_all=[];
    for tif_file in tif_files_sorted:
        row_curr=[];
        tif_file_full=os.path.join(tif_dir,tif_file);
        file_name_only=tif_file[:tif_file.rindex('.')];
        file_pre=file_name_only[:file_name_only.rindex('_')+1];
        num_file=int(file_name_only[file_name_only.rindex('_')+1:]);
        num_match=num_file+inc;
        im_1=os.path.join(im_dir,file_name_only+'.jpg');
        im_2=os.path.join(im_dir,file_pre+str(num_match)+'.jpg');
        row_curr.append(im_1);
        row_curr.append(im_2);
        tif=scipy.misc.imread(tif_file_full);
        out_x=os.path.join(out_tif,file_name_only+'_x.png');
        out_y=os.path.join(out_tif,file_name_only+'_y.png');
        visualize.visualizeFlo(tif,out_x,out_y);
        row_curr.append(out_x);
        row_curr.append(out_y);
        row_curr=[path.replace(rel_path[0],rel_path[1]) for path in row_curr];
        rows_all.append(row_curr);
        captions_all.append([str(num_file),str(num_match),'x','y']);
    
    visualize.writeHTML(out_file_html,rows_all,captions_all);
コード例 #40
0
def script_visualizeLossesFromExperiment():

    # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet';
    out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayerAlexNet';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel';
    layers=['conv1','conv2','conv3','conv4','conv5','fc6'];
    # ,'fc7'];

    layers_str=[];
    for idx in range(len(layers)):
        # if idx==0:
        #     fix_layers=layers[0];
        #     layer_str=str(fix_layers);
        # else:
        fix_layers=layers[:idx+1];
        layer_str='_'.join(fix_layers);
        layers_str.append(layer_str);

    log_files=[os.path.join(out_dir,'log_'+layer_str+'.log') for layer_str in layers_str];
    str_match=' solver.cpp:209] Iteration ';
    xAndYs=[svl.getIterationsAndLosses(log_file,str_match) for log_file in log_files];

    out_files=[];
    for layer_str,log_file in zip(layers_str,log_files):
        xAndY=svl.getIterationsAndLosses(log_file,str_match);
        print xAndY
        out_file=os.path.join(out_dir,'loss_'+layer_str+'.png');
        visualize.plotSimple([xAndY],out_file,title=layer_str);
        out_files.append(out_file);

    out_file_html=os.path.join(out_dir,'losses_all.html');
    img_paths=[[util.getRelPath(out_file,'/disk3')] for out_file in out_files];
    captions=[['']]*len(out_files);
    print img_paths
    print captions
    visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=300);
コード例 #41
0
def script_visualizeFlos(params):
    out_file_html = params.out_file_html
    out_dir_flo_im = params.out_dir_flo_im
    flo_dir = params.flo_dir
    im_file_1 = params.im_file_1
    im_file_2 = params.im_file_2
    height = params.height
    width = params.width
    rel_path = params.rel_path

    list_1=util.readLinesFromFile(im_file_1);
    list_2=util.readLinesFromFile(im_file_2);
    flo_files=[os.path.join(flo_dir,file_curr) for file_curr in os.listdir(flo_dir) if file_curr.endswith('.flo')];
    flo_nums=[int(file_curr[file_curr.rindex('(')+1:file_curr.rindex(')')]) for file_curr in flo_files];
    print len(list_1),len(list_2),len(flo_files);
    flo_files_sorted=[];
    for idx_flo in range(len(flo_nums)):
        idx_curr=flo_nums.index(idx_flo);
        flo_files_sorted.append(flo_files[idx_curr]);
    
    im_list=[];
    caption_list=[];
    # for idx_flo,flo_file_curr in enumerate(flo_files_sorted):
    for idx_flo in range(len(list_1)):
        flo_file_curr=flo_files_sorted[idx_flo];
        out_flo_x=os.path.join(out_dir_flo_im,str(idx_flo)+'_x.png');
        out_flo_y=os.path.join(out_dir_flo_im,str(idx_flo)+'_y.png');
        flo=util.readFlowFile(flo_file_curr);
        visualize.visualizeFlo(flo,out_flo_x,out_flo_y);
        im_file_rel_1=list_1[idx_flo].replace(rel_path[0],rel_path[1]);
        im_file_rel_2=list_2[idx_flo].replace(rel_path[0],rel_path[1]);
        flo_file_rel_1=out_flo_x.replace(rel_path[0],rel_path[1]);
        flo_file_rel_2=out_flo_y.replace(rel_path[0],rel_path[1]);
        im_list.append([im_file_rel_1,im_file_rel_2,flo_file_rel_1,flo_file_rel_2]);
        caption_list.append(['Image 1','Image 2','Flow x','Flow y']);
    visualize.writeHTML(out_file_html,im_list,caption_list,height=height,width=width)
コード例 #42
0
def script_visualizeNNComparisonWithHash(params):
    in_file = params.in_file
    in_file_hash = params.in_file_hash
    out_file_html = params.out_file_html
    rel_path = params.rel_path
    topn = params.topn
    img_size = params.img_size

    [_,_,labels_test,labels_train,img_paths_test,img_paths_train,indices,_]=pickle.load(open(in_file,'rb'));
    [indices_hash,_,_]=pickle.load(open(in_file_hash,'rb'));

    img_paths_nn,captions_nn=getImgPathsAndCaptionsNN(indices,img_paths_test,img_paths_train,labels_test,labels_train,rel_path)
    img_paths_hash,captions_hash=getImgPathsAndCaptionsNN(indices_hash,img_paths_test,img_paths_train,labels_test,labels_train,rel_path)

    
    img_paths_all=[];
    captions_all=[];
    for idx in range(len(img_paths_nn)):
        img_paths_all.append(img_paths_nn[idx][:topn]);
        img_paths_all.append(img_paths_hash[idx][:topn]);
        captions_all.append([x+' nn' for x in captions_nn[idx][:topn]]);
        captions_all.append([x+' hash' for x in captions_hash[idx][:topn]]);

    visualize.writeHTML(out_file_html,img_paths_all,captions_all,img_size[0],img_size[1]);
コード例 #43
0
def main():

    # data='/home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt'
    # # /home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt
    # to_search=\
    # ['/home/SSD3/maheen-data/horse_project/data_check/horse/im/horses_pascal_selected/2009_004662.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/horses_pascal_selected/2009_004662.npy',
    # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_11539.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_11539.npy',
    # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_16786.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_16786.npy',
    # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_4338.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_4338.npy']
    # data=util.readLinesFromFile(data);
    # print data[0];

    # to_search=[file_curr.replace('data_check','data_resize') for file_curr in to_search];
    # idx_lines=[data.index(line_curr) for line_curr in to_search if line_curr in data];
    # print idx_lines;
    # for idx_line_curr in idx_lines:
    #     print 'batch_no',(idx_line_curr)/64

    # # npy_files=[file_curr[file_curr.index(' ')+1:] for file_curr in data];
    # # print npy_files[0];
    # # print len(npy_files);
    # # p=multiprocessing.Pool(multiprocessing.cpu_count());
    # # problem_files=p.map(findProblemNPYMP,npy_files);
    # # problem_files=[file_curr for file_curr in problem_files if file_curr is not None];
    # # print (len(problem_files));

    # return

    # data='/home/laoreja/new-deep-landmark/train/vanilla/aflw_224/aflw_vanilla_val_224.txt';
    # data='/home/laoreja/new-deep-landmark/train/vanilla/aflw_224/aflw_vanilla_train_224_weight.txt';
    # data=util.readLinesFromFile(data);
    # print data;
    # total=0;
    # for h5_file_curr in data:
    #     with h5py.File(h5_file_curr,'r') as hf:
    #         print('List of arrays in this file: ', hf.keys())
    #         data = hf.get('confidence')
    #         np_data = np.array(data)
    #         total=total+np_data.shape[0];
    #         print('Shape of the array dataset_1: ', np_data.shape)
    # print total;

    # return
    # horse_path='/home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt'
    # human_path_noIm='/home/SSD3/maheen-data/horse_project/data_resize/aflw/matches_5_train_allKP_noIm.txt'
    # human_path='/home/SSD3/maheen-data/horse_project/data_resize/aflw/matches_5_train_allKP.txt'
    # paths=[horse_path,human_path_noIm,human_path];
    # out_files=[file_curr[:file_curr.rindex('.')]+'_dummy.txt' for file_curr in paths];
    # for file_curr,out_file_curr in zip(paths,out_files):
    #     data_curr=util.readLinesFromFile(file_curr);
    #     data_curr=data_curr[0:50:5];
    #     # print data_curr;
    #     print len(data_curr);
    #     util.writeFile(out_file_curr,data_curr);
    #     print out_file_curr;

    # return
    # im_path= "/home/SSD3/maheen-data/horse_project/data_resize/horse/im/_04_Aug16_png/horse+head12.jpg"
    #   # 2 : "/home/SSD3/maheen-data/horse_project/data_resize/horse/npy/_04_Aug16_png/horse+head12.npy"
    # # "/home/SSD3/maheen-data/horse_project/data_resize/aflw/im/0/image67102_20650.jpg"
    # np_path="/home/SSD3/maheen-data/horse_project/data_resize/horse/npy/_04_Aug16_png/horse+head12.npy"
    # # "/home/SSD3/maheen-data/horse_project/data_resize/aflw/npy/0/image67102_20650.npy"

    # # im=scipy.misc.read(im_path);
    # im=cv2.imread(im_path);

    # labels=np.load(np_path);
    # print labels
    # for i in xrange(labels.shape[0]):
    #     cv2.circle(im, (labels[i][0], labels[i][1]), 2, (0,0,255), -1)
    # cv2.imwrite('/home/SSD3/maheen-data/temp/check.png', im)

    # return

    # path_to_th='/home/maheenrashid/Downloads/horses/torch/test_tps_cl.th';
    # iterations=10;
    # out_dir_models='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam'
    # model_pre=os.path.join(out_dir_models,'intermediate','model_all_');
    # model_post='.dat';
    # range_models=range(450,4500+1,450);
    # out_dir_meta=os.path.join(out_dir_models,'test_overtime');
    # batch_size=60;

    # # commands=generateTPSTestCommands(path_to_th,batch_size,iterations,model_pre,model_post,range_models,out_dir_meta)
    # # print len(commands);
    # # print commands[0];

    # # out_file_commands=os.path.join(out_dir_meta+'.sh');
    # # util.writeFile(out_file_commands,commands);

    # dir_server='/home/SSD3/maheen-data';
    # range_batches=range(1,10);
    # # batch_size=60;
    # range_images=range(1,61,5);
    # img_dir_meta='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_overtime'
    # img_dir=[os.path.join(img_dir_meta,'model_all_'+str(range_model_curr)) for range_model_curr in range_models]
    # out_file_html='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz.html'
    # file_post=['_horse.jpg','_human.jpg','_gtwarp.jpg','_predwarp.jpg']
    # loss_post='_loss.npy';
    # out_file_html=img_dir_meta+'.html';
    # img_caption_pre=[str(model_num) for model_num in range_models];
    # comparativeLossViz(img_dir,file_post,loss_post,range_batches,range_images,out_file_html,dir_server,img_caption_pre)

    # return
    dir_server = '/home/SSD3/maheen-data'
    range_batches = range(1, 9)
    # batch_size=60;
    range_images = range(1, 129, 5)
    img_dir = ['/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz/']
    # out_file_html='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz.html'

    img_dir = [
        '/home/SSD3/maheen-data/horse_project/tps_train_allKP_adam/test_viz'
    ]
    out_file_html = '/home/SSD3/maheen-data/horse_project/tps_train_allKP_adam/test_viz.html'

    file_post = ['_horse.jpg', '_human.jpg', '_gtwarp.jpg', '_predwarp.jpg']
    loss_post = '_loss.npy'
    comparativeLossViz(img_dir, file_post, loss_post, range_batches,
                       range_images, out_file_html, dir_server)

    return
    img_files = []
    caption_files = []
    out_dir = '/home/SSD3/maheen-data/training_kp_withWarp_test_debug_tps_adam'
    out_dir = '/home/SSD3/maheen-data/testing_5_kp_withWarp_fixed_adam_debug'
    out_dir = '/home/SSD3/maheen-data/training_5_kp_withWarp_fixed_adam__1e-05/test'
    dir_server = '/home/SSD3/maheen-data'
    out_file_html = os.path.join(out_dir, 'viz.html')

    for i in range(1, 94):
        im_file = os.path.join(out_dir,
                               str(i) + '_org.jpg')
        warp_file = os.path.join(out_dir,
                                 str(i) + '_warp.jpg')
        im_file_small = os.path.join(out_dir,
                                     str(i) + '_small_org.jpg')
        warp_file_small = os.path.join(out_dir,
                                       str(i) + '_small_warp.jpg')
        im_file = util.getRelPath(im_file, dir_server)
        warp_file = util.getRelPath(warp_file, dir_server)

        im_file_small = util.getRelPath(im_file_small, dir_server)
        warp_file_small = util.getRelPath(warp_file_small, dir_server)

        img_files.append([im_file, warp_file])
        # ,im_file_small,warp_file_small]);
        caption_files.append([str(i) + ' org',
                              str(i) + ' warp'])
        # ,'small_org','small_warp']);

    visualize.writeHTML(out_file_html, img_files, caption_files, 224, 224)

    return
    out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw'
    num_neighbors = 5
    out_file_human = os.path.join(
        out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt')
    out_file_human_new = os.path.join(
        out_dir_meta_face,
        'matches_' + str(num_neighbors) + '_val_fiveKP_noIm.txt')
    modifyHumanFile(out_file_human, out_file_human_new)

    # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw';
    out_file_human = os.path.join(
        out_dir_meta_face,
        'matches_' + str(num_neighbors) + '_train_fiveKP.txt')
    out_file_human_new = os.path.join(
        out_dir_meta_face,
        'matches_' + str(num_neighbors) + '_train_fiveKP_noIm.txt')
    modifyHumanFile(out_file_human, out_file_human_new)

    return
    # matches_file='/home/maheenrashid/Downloads/knn_5_points_train_list_clean.txt'
    # face_data_file='/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt';
    # # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt';
    # face_data_list_file='/home/SSD3/maheen-data/aflw_data/npy/data_list.txt';

    # out_dir_meta_horse='/home/SSD3/maheen-data/horse_project/horse';
    # out_dir_meta_horse_list=[os.path.join(out_dir_meta_horse,'im'),os.path.join(out_dir_meta_horse,'npy')];
    # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw';
    # out_dir_meta_face_list=[os.path.join(out_dir_meta_face,'im'),os.path.join(out_dir_meta_face,'npy')];

    # out_dir_meta_face_old='/home/SSD3/maheen-data/horse_project/face';
    # out_dir_meta_face_old_list=[os.path.join(out_dir_meta_face_old,'im'),os.path.join(out_dir_meta_face_old,'npy')];

    # num_neighbors=5;
    # out_file_face=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP.txt');
    # out_file_horse=os.path.join(out_dir_meta_horse,'matches_'+str(num_neighbors)+'_train_fiveKP.txt');

    # missing_files=makeMatchFile(num_neighbors,matches_file,face_data_file,out_dir_meta_horse_list,out_dir_meta_face_list,out_file_horse,out_file_face,out_dir_meta_face_old_list)

    # return

    # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw';
    # num_neighbors=5;
    # out_file_human=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_val_fiveKP.txt');
    # out_file_human_new=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_val_fiveKP_noIm.txt');
    # # modifyHumanFile(out_file_human,out_file_human_new)

    # # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw';
    # out_file_human=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP.txt');
    # out_file_human_new=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP_noIm.txt');
    # # modifyHumanFile(out_file_human,out_file_human_new)
    # print out_file_human_new;

    # return
    # img_files=[];
    # caption_files=[];
    # out_dir='/home/SSD3/maheen-data/training_kp_withWarp_test_final'
    # dir_server='/home/SSD3/maheen-data';
    # out_file_html=os.path.join(out_dir,'viz.html');

    # for i in range(1,94):
    #     im_file=os.path.join(out_dir,str(i)+'.jpg');
    #     warp_file=os.path.join(out_dir,str(i)+'_warp.jpg');
    #     im_file=util.getRelPath(im_file,dir_server);
    #     warp_file=util.getRelPath(warp_file,dir_server);
    #     img_files.append([im_file,warp_file]);
    #     caption_files.append(['org','warp']);

    # visualize.writeHTML(out_file_html,img_files,caption_files,224,224);

    # return

    file_horse = '/home/SSD3/maheen-data/horse_project/horse/matches_5_train_fiveKP.txt'
    out_file_horse = '/home/SSD3/maheen-data/horse_project/horse_resize/matches_5_train_fiveKP.txt'

    lines = util.readLinesFromFile(file_horse)
    print len(lines)

    lines = list(set(lines))

    print len(lines)

    lines = [line_curr.split(' ') for line_curr in lines]

    im_files = [line_curr[0] for line_curr in lines]
    npy_files = [line_curr[1] for line_curr in lines]

    out_dir_meta_old = '/home/SSD3/maheen-data/horse_project/horse/'
    out_dir_meta_new = '/home/SSD3/maheen-data/horse_project/horse_resize/'
    replace_paths = [out_dir_meta_old, out_dir_meta_new]

    args = []
    for idx in range(len(im_files)):
        im_file = im_files[idx]
        npy_file = npy_files[idx]
        out_im_file = im_file.replace(replace_paths[0], replace_paths[1])
        out_npy_file = npy_file.replace(replace_paths[0], replace_paths[1])
        args.append((idx, im_file, npy_file, out_im_file, out_npy_file))

    p = multiprocessing.Pool(multiprocessing.cpu_count())
    p.map(resizeImAndNpy224, args)

    out_dir_meta_old = '/home/SSD3/maheen-data/horse_project/horse/'
    out_dir_meta_new = '/home/SSD3/maheen-data/horse_project/horse_resize/'
    replace_paths = [out_dir_meta_old, out_dir_meta_new]
    lines = util.readLinesFromFile(file_horse)
    lines_new = [
        line.replace(replace_paths[0], replace_paths[1]) for line in lines
    ]
    util.writeFile(out_file_horse, lines_new)

    lines = util.readLinesFromFile(out_file_horse)
    print(len(lines))
    im_file = lines[90].split(' ')[0]
    im = cv2.imread(im_file, 1)

    labels = np.load(lines[90].split(' ')[1])

    for i in xrange(labels.shape[0]):
        cv2.circle(im, (labels[i][0], labels[i][1]), 2, (0, 0, 255), -1)
    cv2.imwrite('/home/SSD3/maheen-data/temp/check.png', im)

    return

    dir_out = '/home/SSD3/maheen-data/temp/horse_human/viz_transform_aflw_val'

    visualize.writeHTMLForFolder(dir_out)

    return
    out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw'
    num_neighbors = 5
    out_file_human = os.path.join(
        out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt')
    out_file_human_new = os.path.join(
        out_dir_meta_face,
        'matches_' + str(num_neighbors) + '_val_fiveKP_noIm.txt')
    modifyHumanFile(out_file_human, out_file_human_new)

    # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw';
    out_file_human = os.path.join(
        out_dir_meta_face,
        'matches_' + str(num_neighbors) + '_train_fiveKP.txt')
    out_file_human_new = os.path.join(
        out_dir_meta_face,
        'matches_' + str(num_neighbors) + '_train_fiveKP_noIm.txt')
    modifyHumanFile(out_file_human, out_file_human_new)

    return
    matches_file = '/home/laoreja/data/knn_res_new/knn_5_points_val_list.txt'

    face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt'
    # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt';
    face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt'

    out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse'
    out_dir_meta_horse_list = [
        os.path.join(out_dir_meta_horse, 'im'),
        os.path.join(out_dir_meta_horse, 'npy')
    ]
    out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw'
    out_dir_meta_face_list = [
        os.path.join(out_dir_meta_face, 'im'),
        os.path.join(out_dir_meta_face, 'npy')
    ]

    out_dir_meta_face_old = '/home/SSD3/maheen-data/horse_project/face'
    out_dir_meta_face_old_list = [
        os.path.join(out_dir_meta_face_old, 'im'),
        os.path.join(out_dir_meta_face_old, 'npy')
    ]

    num_neighbors = 5
    out_file_face = os.path.join(
        out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt')
    out_file_horse = os.path.join(
        out_dir_meta_horse,
        'matches_' + str(num_neighbors) + '_val_fiveKP.txt')

    missing_files = makeMatchFile(num_neighbors, matches_file, face_data_file,
                                  out_dir_meta_horse_list,
                                  out_dir_meta_face_list, out_file_horse,
                                  out_file_face, out_dir_meta_face_old_list)

    return
    matches_file = '/home/laoreja/data/knn_res_new/knn_5_points_train_list.txt'
    matches_file = '/home/maheenrashid/Downloads/knn_5_points_train_list_clean.txt'
    face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt'
    # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt';
    face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt'

    out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse'
    out_dir_meta_horse_list = [
        os.path.join(out_dir_meta_horse, 'im'),
        os.path.join(out_dir_meta_horse, 'npy')
    ]
    out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw'
    out_dir_meta_face_list = [
        os.path.join(out_dir_meta_face, 'im'),
        os.path.join(out_dir_meta_face, 'npy')
    ]

    out_dir_meta_face_old = '/home/SSD3/maheen-data/horse_project/face'
    out_dir_meta_face_old_list = [
        os.path.join(out_dir_meta_face_old, 'im'),
        os.path.join(out_dir_meta_face_old, 'npy')
    ]

    num_neighbors = 5
    out_file_face = os.path.join(
        out_dir_meta_face,
        'matches_' + str(num_neighbors) + '_train_fiveKP.txt')
    out_file_horse = os.path.join(
        out_dir_meta_horse,
        'matches_' + str(num_neighbors) + '_train_fiveKP.txt')

    missing_files = makeMatchFile(num_neighbors, matches_file, face_data_file,
                                  out_dir_meta_horse_list,
                                  out_dir_meta_face_list, out_file_horse,
                                  out_file_face, out_dir_meta_face_old_list)

    return
    out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw'
    num_neighbors = 5
    out_file_face = os.path.join(out_dir_meta_face,
                                 'matches_' + str(num_neighbors) + '.txt')
    out_file_face_new = os.path.join(
        out_dir_meta_face, 'matches_noIm_' + str(num_neighbors) + '.txt')
    # modifyHumanFile(out_file_face,out_file_face_new);

    # old_data=util.readLinesFromFile(out_file_face);
    # old_data=[line_curr.split(' ')[1] for line_curr in old_data];
    # new_data=util.readLinesFromFile(out_file_face_new);
    # new_data=[line_curr.split(' ')[0] for line_curr in new_data];
    # assert len(old_data)==len(new_data);
    # for i,old_line in enumerate(old_data):
    #     print i;
    #     assert old_line==new_data[i];

    return
    matches_file = '/home/laoreja/data/knn_res_new/5_points_list.txt'

    matches_file = '/home/laoreja/data/knn_res_new/knn_train_list.txt'
    face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt'
    face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt'
    out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse'
    out_dir_meta_horse_list = [
        os.path.join(out_dir_meta_horse, 'im'),
        os.path.join(out_dir_meta_horse, 'npy')
    ]
    out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw'
    out_dir_meta_face_list = [
        os.path.join(out_dir_meta_face, 'im'),
        os.path.join(out_dir_meta_face, 'npy')
    ]
    num_neighbors = 5
    out_file_face = os.path.join(out_dir_meta_face,
                                 'matches_' + str(num_neighbors) + '.txt')
    out_file_horse = os.path.join(out_dir_meta_horse,
                                  'matches_' + str(num_neighbors) + '.txt')

    makeMatchFile(num_neighbors, matches_file, face_data_file,
                  out_dir_meta_horse_list, out_dir_meta_face_list,
                  out_file_horse, out_file_face)

    return
    # script_saveTrainTxt()
    # dir_viz='/home/SSD3/maheen-data/temp/horse_human/viz_transform_aflw';
    # visualize.writeHTMLForFolder(dir_viz,'.jpg');

    return
    out_dir_meta = '/home/SSD3/maheen-data'
    face_dir = 'aflw_data'
    horse_dir = 'horse_data'
    num_neighbors = 5

    path_replace_horse = [
        '/home/laoreja/data/horse-images/annotation',
        os.path.join(out_dir_meta, horse_dir, 'im')
    ]
    path_replace_face = ['/npy/', '/im/']
    new_match_file = os.path.join(out_dir_meta, face_dir,
                                  'match_' + str(num_neighbors) + '.txt')
    out_face_train_file = os.path.join(
        out_dir_meta, face_dir, 'match_' + str(num_neighbors) + '_train.txt')
    out_horse_train_file = os.path.join(
        out_dir_meta, horse_dir, 'match_' + str(num_neighbors) + '_train.txt')
    horse_txt_file = os.path.join(out_dir_meta, horse_dir, 'train.txt')
    face_txt_file = os.path.join(out_dir_meta, face_dir, 'train.txt')

    horse_train = util.readLinesFromFile(horse_txt_file)
    horse_train_just_beginning = [
        horse_curr.split(' ')[0] for horse_curr in horse_train
    ]
    horse_train_just_beginning = [
        horse_curr[:horse_curr.rindex('.')]
        for horse_curr in horse_train_just_beginning
    ]
    print horse_train_just_beginning[0]
    face_train = util.readLinesFromFile(face_txt_file)
    face_train_just_beginning = [
        face_curr.split(' ')[0] for face_curr in face_train
    ]
    face_train_just_beginning = [
        face_curr[:face_curr.rindex('.')]
        for face_curr in face_train_just_beginning
    ]

    print len(horse_train)
    print horse_train[0]
    print len(face_train)
    print face_train[0]
    # return
    matches = util.readLinesFromFile(new_match_file)
    print(len(matches))
    matches = [match_curr.split(' ') for match_curr in matches]

    horse_matches = []
    face_matches = []

    for match_curr in matches:
        assert len(match_curr) == num_neighbors + 1
        horse_curr = match_curr[0]

        horse_curr_path, horse_name = os.path.split(horse_curr)

        if horse_curr_path[-3:] == 'gxy':
            horse_curr_path = horse_curr_path[:-3]

        horse_curr_path = horse_curr_path.replace(path_replace_horse[0],
                                                  path_replace_horse[1])

        horse_curr = os.path.join(horse_curr_path,
                                  horse_name[:horse_name.rindex('.')])
        if horse_curr in horse_train_just_beginning:
            horse_match = horse_train[horse_train_just_beginning.index(
                horse_curr)]
        else:
            # print horse_curr
            # print match_curr[0];
            # raw_input();
            continue

        for face_curr in match_curr[1:]:
            face_curr = face_curr[:face_curr.rindex('.')]
            face_curr = face_curr.replace(path_replace_face[0],
                                          path_replace_face[1])
            face_match = face_train[face_train_just_beginning.index(face_curr)]
            horse_matches.append(horse_match)
            face_matches.append(face_match)

        # print match_curr;
        # print match_curr[0];
        # for idx,i in enumerate(match_curr[1:]):
        #   print idx,face_matches[idx],i,horse_matches[idx]
    assert len(face_matches) == len(horse_matches)
    print len(face_matches)
    util.writeFile(out_face_train_file, face_matches)
    util.writeFile(out_horse_train_file, horse_matches)

    return
    # face_dir='/home/SSD3/maheen-data/face_data';
    # train_txt=os.path.join(face_dir,'train.txt');
    # files=util.readLinesFromFile(train_txt);
    # files=[file_curr.split(' ') for file_curr in files];
    # [im_files,npy_files]=zip(*files);
    # for idx,npy_file in enumerate(npy_files):
    #   print idx,len(npy_files);
    #   assert os.path.exists(npy_file);
    #   assert np.load(npy_file).shape[1]==3;

    # print len(im_files);
    # print (im_files[0]);

    # print len(npy_files);
    # print (npy_files[0]);
    dir_viz = '/home/SSD3/maheen-data/temp/horse_human/viz_transform'
    visualize.writeHTMLForFolder(dir_viz, '.jpg')

    return
    horse_data = '/home/SSD3/maheen-data/horse_data'
    new_face_data = '/home/SSD3/maheen-data/face_data'
    old_txt = 'train.txt'
    num_to_keep = 10
    new_txt = 'train_' + str(num_to_keep) + '.txt'
    for data_type in [horse_data, new_face_data]:
        lines_new = util.readLinesFromFile(os.path.join(data_type, old_txt))
        random.shuffle(lines_new)
        lines_new = lines_new[:num_to_keep]
        file_new = os.path.join(data_type, new_txt)
        util.writeFile(file_new, lines_new)
        print len(lines_new), file_new

    return
コード例 #44
0
def main():

    out_file_html = '/disk2/aprilExperiments/horses/frames_with_detections/visualize.html'
    out_dir_meta = '/disk2/aprilExperiments/horses/frames_with_detections'
    img_paths = []
    captions = []
    rel_path = ['/disk2', '../../../..']
    for dir_curr in os.listdir(out_dir_meta):
        dir_curr = os.path.join(out_dir_meta, dir_curr)
        if os.path.isdir(dir_curr):
            print dir_curr
            jpegs = [
                os.path.join(dir_curr, file_curr)
                for file_curr in os.listdir(dir_curr)
                if file_curr.endswith('.png')
            ]
            jpegs = [
                file_curr.replace(rel_path[0], rel_path[1])
                for file_curr in jpegs
            ]
            # print jpegs[:10];
            jpegs.sort()
            # print jpegs[:10];
            captions_curr = [''] * len(jpegs)
            print captions_curr
            img_paths.append(jpegs)
            captions.append(captions_curr)
            # raw_input();
    visualize.writeHTML(out_file_html,
                        img_paths,
                        captions,
                        height=100,
                        width=100)

    return
    dirs_meta = [
        '/disk2/aprilExperiments/horses/mediaFromPPT_frames',
        '/disk2/aprilExperiments/horses/ResearchSpring2016_frames'
    ]
    out_file = '/disk2/aprilExperiments/horses/list_of_frames.txt'
    im_list = []
    for dir_curr in dirs_meta:
        list_curr = [
            os.path.join(dir_curr, im_curr) for im_curr in os.listdir(dir_curr)
            if im_curr.endswith('.jpg')
        ]
        im_list = im_list + list_curr
    util.writeFile(out_file, im_list)

    return
    in_file = '/disk2/aprilExperiments/horses/list_of_frames.txt'
    out_dir_meta = '/disk2/aprilExperiments/horses/frames_with_detections/'
    util.mkdir(out_dir_meta)

    with open(in_file, 'rb') as f:
        im_names = f.readlines()
    im_names = [line.strip('\n') for line in im_names]

    for im_name in im_names:
        vid_name = im_name[im_name.rindex('/') + 1:im_name.rindex('_')]
        out_dir_curr = os.path.join(out_dir_meta, vid_name)
        if not os.path.exists(out_dir_curr):
            os.mkdir(out_dir_curr)

    return
    out_dir = '/disk2/temp/horses'
    arr_file = os.path.join(out_dir, 'Outside4_00011_horse_detections.npy')
    im_file = '/disk2/aprilExperiments/horses/ResearchSpring2016_frames/Outside4_00011.jpg'
    arr = np.load(arr_file)

    out_file = arr_file[:-4] + '.png'
    saveDets(im_file, 'horse', arr, out_file, 0.8)
    # plt.imshow(im);
    # plt.savefig();
    print 'done'
コード例 #45
0
def script_visualizeRankDifferenceAsHist(params):
    out_file_pre = params.out_file_pre
    out_file_html = params.out_file_html
    rel_path = params.rel_path
    class_ids = params.class_id
    layers = params.layer

    if not hasattr(class_ids, '__iter__'):
        class_ids = [class_ids]

    if not hasattr(class_ids, '__iter__'):
        layers = [layers]

    img_paths_html = []
    captions = []
    for class_id in class_ids:
        img_paths_html_row = []
        captions_row = []
        for layer in layers:
            params = params._replace(class_id=class_id)
            params = params._replace(layer=layer)
            output = getNNRankComparisonInfo(params)
            indices_difference = experiments_super.getDifferenceInRank(
                output['img_paths_nn_train'], output['img_paths_nn_no_train'],
                output['nn_rank_train'], output['nn_rank_no_train'])
            xlabel = 'Difference in Rank (Train - Untrained)'
            ylabel = 'Frequency'
            title = class_id + ' ' + layer
            indices_difference = [
                diff_curr for diffs_curr in indices_difference
                for diff_curr in diffs_curr
            ]

            if len(indices_difference) == 0:
                continue

            out_file_im = out_file_pre + '_' + str(params.angle) + '_' + str(
                params.diff) + '_' + str(
                    params.delta) + '_' + class_id + '_' + layer + '.png'
            img_paths_html_row.append(
                out_file_im.replace(rel_path[0], rel_path[1]))
            total = len(indices_difference)
            sum_less = sum(np.array(indices_difference) < 0) / float(total)
            sum_less = '%0.2f' % (sum_less, )
            sum_more = sum(np.array(indices_difference) >= 0) / float(total)
            sum_more = '%0.2f' % (sum_more, )
            captions_row.append('Total ' + str(total) + ', <0: ' + sum_less +
                                ', >0: ' + sum_more)

            visualize.hist(indices_difference,
                           out_file=out_file_im,
                           bins=params.bins,
                           normed=params.normed,
                           xlabel=xlabel,
                           ylabel=ylabel,
                           title=title)

        img_paths_html.append(img_paths_html_row)
        captions.append(captions_row)

    visualize.writeHTML(out_file_html, img_paths_html, captions,
                        params.height_width[0], params.height_width[1])
コード例 #46
0
def script_writeCommandsForSavingHashValsNpz():
    out_dir = '/disk2/decemberExperiments/hash_tables'

    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    path_to_db = 'sqlite://///disk2/novemberExperiments/experiments_youtube/patches_nn_hash.db'

    out_file = os.path.join(out_dir, 'commands_list.txt')
    file_script = '/home/maheenrashid/Downloads/caffe/caffe-rc2/python/maheen_code/script_saveHashValsNpz.py'
    commands = []
    for hash_table in range(32):
        for hash_val in range(256):
            command = []
            command.append('python')
            command.append(file_script)
            command.append(path_to_db)
            command.append(out_dir)
            command.append(str(hash_table))
            command.append(str(hash_val))
            command = ' '.join(command)
            commands.append(command)

    with open(out_file, 'wb') as f:
        for command in commands:
            f.write(command + '\n')


# def script_scoreRandomFrames(params):
    path_to_db = params.path_to_db
    class_labels_map = params.class_labels_map
    npz_path = params.npz_path
    numberOfFrames = params.numberOfFrames
    max_idx = params.max_idx
    n_jobs = params.n_jobs
    table_idx_all = params.table_idx_all
    out_file_html = params.out_file_html
    rel_path = params.rel_path
    width_height = params.width_height
    out_file_frames = params.out_file_frames

    [class_labels, class_idx] = zip(*class_labels_map)

    if not os.path.exists(out_file_frames):
        frames_all = []
        for table_idx in table_idx_all:
            scores, class_idx_curr, frame_path = getScoreForIdx(
                table_idx,
                path_to_db,
                class_idx_pascal=class_idx,
                npz_path=npz_path,
                n_jobs=n_jobs)

            frames_all.append([frame_path, class_idx_curr, scores])

        pickle.dump(frames_all, open(out_file_frames, 'wb'))

    frames_all = pickle.load(open(out_file_frames, 'rb'))

    img_paths = []
    captions = []
    for frame_path, class_idx_curr, scores in frames_all:

        scores = np.array(scores)
        avg_scores = np.mean(scores, axis=0)
        gt_idx = class_idx.index(class_idx_curr)
        gt_score = avg_scores[gt_idx]
        sort_idx = np.argsort(avg_scores)[::-1]
        max_idx = sort_idx[0]
        max_score = avg_scores[max_idx]
        max_class_idx = class_idx[max_idx]
        gt_rank = np.where(sort_idx == gt_idx)[0][0]

        caption_curr = []
        caption_curr.append('GT')
        caption_curr.append(class_labels[class_idx.index(class_idx_curr)])
        caption_curr.append(str(round(gt_score, 4)))
        caption_curr.append(str(gt_rank + 1))
        caption_curr.append('Best')
        caption_curr.append(class_labels[max_idx])
        caption_curr.append(str(round(max_score, 4)))

        caption_curr = ' '.join(caption_curr)

        img_paths.append([frame_path.replace(rel_path[0], rel_path[1])])
        captions.append([caption_curr])

    visualize.writeHTML(out_file_html, img_paths, captions, width_height[0],
                        width_height[1])
コード例 #47
0
def saveHTML(out_us, us_test, batch_size=50, num_iter=2, justHTML=False):
    dir_server = './'
    post_us = ['_gt_pts.npy', '_pred_pts.npy']

    im_paths, gt_pt_files, pred_pt_files = us_getFilePres(
        us_test, out_us, post_us, num_iter, batch_size)
    if justHTML:
        post_ims_us = [
            '_org_nokp.jpg',
            '_gt.jpg',
            '_warp_nokp.jpg',
            '_warp.jpg',
            '_org.jpg',
        ]
        captions_for_row = [
            'Input', 'Ground Truth', 'Warped Image', 'Prediction Warped',
            'Prediction'
        ]
        out_file_html = os.path.join(out_us, 'results.html')

        idx_sort = range(len(gt_pt_files))
        ims = []
        captions = []
        for idx_idx, idx_curr in enumerate(idx_sort):
            file_curr = gt_pt_files[idx_curr]
            file_curr = os.path.split(file_curr)[1]
            file_curr = file_curr[:file_curr.index('_gt')]
            files_us = [
                os.path.join(dir_server, file_curr + post_im_curr)
                for post_im_curr in post_ims_us
            ]
            captions_us = [
                str(idx_idx) + ' ' + caption_curr
                for caption_curr in captions_for_row
            ]
            ims.append(files_us)
            captions.append(captions_us)

        visualize.writeHTML(out_file_html, ims, captions)
        print out_file_html
    else:
        errors_curr = us_getErrorsAll(us_test, out_us, post_us, num_iter,
                                      batch_size)
        err = np.array(errors_curr)
        bin_keep = err >= 0
        err[err < 0] = 0
        div = np.sum(bin_keep, 1)
        sum_val = np.sum(err, 1).astype(np.float)
        avg = sum_val / div

        post_ims_us = [
            '_org_nokp.jpg',
            '_gt.jpg',
            '_warp_nokp.jpg',
            '_warp.jpg',
            '_org.jpg',
        ]
        captions_for_row = [
            'Input', 'Ground Truth', 'Warped Image', 'Prediction Warped',
            'Prediction'
        ]
        out_file_html = os.path.join(out_us, 'results.html')
        idx_sort = np.argsort(avg)
        ims = []
        captions = []
        for idx_idx, idx_curr in enumerate(idx_sort):
            file_curr = gt_pt_files[idx_curr]
            file_curr = os.path.split(file_curr)[1]
            file_curr = file_curr[:file_curr.index('_gt')]
            files_us = [
                os.path.join(dir_server, file_curr + post_im_curr)
                for post_im_curr in post_ims_us
            ]
            captions_us = [
                str(idx_idx) + ' ' + caption_curr
                for caption_curr in captions_for_row
            ]
            ims.append(files_us)
            captions.append(captions_us)

        visualize.writeHTML(out_file_html, ims, captions)
        print out_file_html

        labels = ['Ours']
        # ,'thems'];
        ticks = ['LE', 'RE', 'N', 'LM', 'RM', 'ALL']
        colors = ['b']
        # ,'g'];
        ylim = None
        errors_all = []

        errors_curr = us_getErrorsAll(us_test, out_us, post_us, num_iter,
                                      batch_size)
        failures, failures_kp = getErrRates(errors_curr, 0.1)
        errors_all.append(errors_curr)
        # errors_all.append(errors_curr[:])

        out_file_kp_err = os.path.join(out_us, 'bar.pdf')
        err_rates_all = plotComparisonKpError(errors_all,
                                              out_file_kp_err,
                                              ticks,
                                              labels,
                                              colors=colors,
                                              ylim=ylim)
        out_file_stats = os.path.join(out_us, 'stats.txt')
        # print err_rates_all;
        string = [
            str(ticks[idx_num_curr]) + ' ' + str(num_curr)
            for idx_num_curr, num_curr in enumerate(err_rates_all[0])
        ]
        print string
        # print failures,failures_kp
        # print errors_all
        # string=' '.join(string);
        util.writeFile(out_file_stats, string)
コード例 #48
0
def script_createHistComparative():
    out_dir_meta = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d'
    train_pre = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_pre = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    dirs = [
        dir[:-7] for dir in os.listdir(
            '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations')
        if dir.endswith('_pascal')
    ]
    layers = ['pool5', 'fc6', 'fc7']
    delta = 5
    caption_text = ['Trained', 'Not Trained']
    replace = [out_dir_meta + '/', '']
    degree = 90
    deg_to_see = 0
    # train_files=[os.path.join(train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
    # non_train_files=[os.path.join(non_train_pre+'_'+layer+'_all_azimuths',dir+'_'+str(degree)+'_'+str(delta)+'_compress_data.p') for layer in layers for dir in dirs];
    # for idx in range(len(train_files)):

    combos = [(dir, layer) for dir in dirs for layer in layers]
    out_file_html = os.path.join(
        out_dir_meta,
        'hist_by_degree_' + str(degree) + '_comparisons_compress.html')
    img_paths = []
    captions = []

    for dir, layer in combos:

        file_train = os.path.join(
            train_pre + '_' + layer + '_all_azimuths',
            dir + '_' + str(degree) + '_' + str(delta) + '_compress_data.p')
        # train_files[idx];
        file_non_train = os.path.join(
            non_train_pre + '_' + layer + '_all_azimuths',
            dir + '_' + str(degree) + '_' + str(delta) + '_compress_data.p')
        # non_train_files[idx];

        hists_train, bins_train = pickle.load(open(file_train, 'rb'))
        hists_non_train, bins_non_train = pickle.load(
            open(file_non_train, 'rb'))

        mid_points_train = [
            bins_train[i] + bins_train[i + 1] / float(2)
            for i in range(len(bins_train) - 1)
        ]
        mid_points_non_train = [
            bins_non_train[i] + bins_non_train[i + 1] / float(2)
            for i in range(len(bins_non_train) - 1)
        ]

        # dir=file_train[file_train.rindex('/')+1:];
        # dir=dir[:dir.index('_')];
        out_file_just_file = layer + '_' + dir + '_' + str(degree) + '_' + str(
            delta) + '.png'
        out_file = os.path.join(out_dir_meta, out_file_just_file)
        title = dir + ' Comparison'
        xlabel = 'Distance Rank'
        ylabel = 'Frequency'

        # print out_file
        img_paths.append([out_file_just_file])
        captions.append([dir + ' ' + layer])

        visualize.plotSimple(zip([mid_points_train, mid_points_non_train],
                                 [hists_train, hists_non_train]),
                             out_file,
                             title=title,
                             xlabel=xlabel,
                             ylabel=ylabel,
                             legend_entries=['Trained', 'Non Trained'],
                             loc=0)
    print out_file_html
    visualize.writeHTML(out_file_html,
                        img_paths,
                        captions,
                        width=400,
                        height=400)
コード例 #49
0
def main():
    out_file_crude = '/disk2/januaryExperiments/tube_scoring/iou_crude_info.p'
    out_file_bestOfBoth = '/disk2/januaryExperiments/tube_scoring/iou_bestOfBoth.p'
    out_file_bestOfBoth_perShot = '/disk2/januaryExperiments/tube_scoring/iou_bestOfBoth_perShot.p'
    out_file_file = '/disk2/januaryExperiments/tube_scoring/iou_crude_info.p'

    class_labels_map = [('boat', 2), ('train', 9), ('dog', 6), ('cow', 5),
                        ('aeroplane', 0), ('motorbike', 8), ('horse', 7),
                        ('bird', 1), ('car', 3), ('cat', 4)]

    avg_iou_pred, avg_iou_best = getAverageIOUPerClassBOB(
        out_file_bestOfBoth, class_labels_map)
    avg_iou_pred_perShot, _ = getAverageIOUPerClassBOB(
        out_file_bestOfBoth_perShot, class_labels_map)
    avg_iou_fanyi = getAverageIOUBestFanyi(out_file_crude, class_labels_map)

    dict_vals = {}
    label_keys = ['Fanyi', 'Shot', 'Frame', 'Best']
    avg_ious = [
        avg_iou_fanyi, avg_iou_pred_perShot, avg_iou_pred, avg_iou_best
    ]
    xtick_labels = avg_iou_pred.keys()
    print xtick_labels

    for k in avg_iou_pred:
        for idx in range(len(label_keys)):
            if label_keys[idx] in dict_vals:
                dict_vals[label_keys[idx]].append(avg_ious[idx][k])
            else:
                dict_vals[label_keys[idx]] = [avg_ious[idx][k]]
        # dict_vals['Shot']=avg_iou_fanyi[k]
        # dict_vals['Frame']=avg_iou_fanyi[k]
        # dict_vals['Best']=avg_iou_fanyi[k]
        print k, avg_iou_fanyi[k], avg_iou_pred_perShot[k], avg_iou_pred[
            k], avg_iou_best[k]

    out_file = '/disk2/januaryExperiments/tube_scoring/avg_iou_comparison.png'

    colors = ['r', 'g', 'b', 'y']
    visualize.plotGroupBar(out_file,
                           dict_vals,
                           xtick_labels,
                           label_keys,
                           colors,
                           ylabel='Average IOU',
                           title='Average IOU Comparison',
                           width=0.25,
                           ylim=[0.2, 0.9])

    return
    # path_to_txt='/disk2/youtube/categories/gt_list.txt';
    params_dict = {}
    params_dict['class_labels_map'] = [('boat', 2), ('train', 9), ('dog', 6),
                                       ('cow', 5), ('aeroplane', 0),
                                       ('motorbike', 8), ('horse', 7),
                                       ('bird', 1), ('car', 3), ('cat', 4)]
    params_dict[
        'out_file'] = '/disk2/januaryExperiments/tube_scoring/gt_data.p'

    params_dict[
        'path_to_tube_ranks'] = '/disk2/januaryExperiments/tube_scoring/scores_perShot'
    params_dict['path_to_mats'] = '/disk2/res11'
    params_dict[
        'out_file_crude_info'] = '/disk2/januaryExperiments/tube_scoring/iou_crude_info_perShot.p'
    params_dict[
        'out_file_bestOfBoth'] = '/disk2/januaryExperiments/tube_scoring/iou_bestOfBoth_perShot.p'
    params = createParams('saveIOUInfo')
    params = params(**params_dict)

    script_saveIOUInfo(params)

    return
    # path_to_data = '/disk2/youtube/categories'
    # saveGTData(path_to_txt,class_labels_map,out_file)

    # return

    [class_labels, class_idx_all] = zip(*class_labels_map)
    [meta_info, coords] = pickle.load(open(out_file, 'rb'))

    meta_curr = meta_info[0]
    gt_box = coords[0]
    meta_curr_str = [str(curr) for curr in meta_curr]
    class_label = class_labels[class_idx_all.index(meta_curr[0])]
    tube_file = os.path.join(path_to_tube_ranks,
                             '_'.join(meta_curr_str[:3]) + '.p')
    mat_file = os.path.join(
        path_to_mats, '_'.join([class_label] + meta_curr_str[1:3]) + '.mat')

    res = scipy.io.loadmat(mat_file, squeeze_me=True,
                           struct_as_record=False)['res']
    tube_coords = getTubeCoordsForFrame(res, meta_curr[-1])

    ious, tubes_gt = getTubeGTOverlap(gt_box, tube_coords)
    tubes_ranked, tubes_scores = pickle.load(open(tube_file, 'rb'))
    tubes_ranked = list(tubes_ranked)

    gt_idx_best_rank, iou_best_rank, rank_idx_best_gt, iou_best_gt = getBestOfBothRank(
        ious, tubes_gt, tubes_ranked)
    print gt_idx_best_rank, iou_best_rank, rank_idx_best_gt, iou_best_gt
    print tubes_ranked

    return
    frame_path = youtube.getFramePath(path_to_data, class_label, meta_curr[1],
                                      meta_curr[2], meta_curr[3])

    im = Image.open(frame_path)
    draw = ImageDraw.Draw(im)

    for idx_tube, tube in enumerate(tubes_ranked):
        draw.line(getLineCoords(tube_coords[tube]),
                  width=2,
                  fill=(255, 255, 0))
    draw.line(getLineCoords(tube_coords[tubes_ranked[0]]),
              width=2,
              fill=(255, 0, 0))

    # for gt_box in gt_coords:
    # print gt_box
    draw.line(getLineCoords(gt_box), width=2, fill=(0, 255, 0))

    out_file = '/disk2/temp/temp.png'
    misc.imsave(out_file, np.array(im))

    # gt_boxes=coords[0];

    # break
    # video_id=;
    # shot_id=;
    # frame_id=;

    #

    # print res['coor']
    # break
    # print sticks[0];
    # videos=[vid for vid in os.listdir(path) if os.path.isdir(os.path.join(path,vid))];
    # for vid in videos:
    #     path_vid = os.path.join(path,vid);
    #     shots=[shot for shot in os.listdir(path_vid) if os.path.isdir(os.path.join(path_vid,shot))];
    #     for shot in shots:
    #         path_shot = os.path.join(path_vid,shot);

    return
    meta_dir = '/disk2/januaryExperiments/tube_scoring'
    out_file_html_pre = '/disk2/januaryExperiments/tube_scoring/best_tubes_comparison'
    rel_path = ['/disk2', '../../..']
    paths_to_im = [
        os.path.join(meta_dir, 'images'),
        os.path.join(meta_dir, 'images_perShot')
    ]
    height_width = [500, 800]
    columns = ['Frame Level', 'Shot Level']
    for class_idx in range(10):
        out_file_html = out_file_html_pre + '_' + str(class_idx) + '.html'
        files = [
            file_curr for file_curr in os.listdir(paths_to_im[0])
            if file_curr.endswith('.png')
            and file_curr.startswith(str(class_idx) + '_')
        ]
        img_paths = []
        captions = []
        for file_curr in files:
            img_paths_row = []
            captions_row = []
            for idx_path_to_im, path_to_im in enumerate(paths_to_im):
                im_curr = os.path.join(path_to_im, file_curr).replace(
                    rel_path[0], rel_path[1])
                img_paths_row.append(im_curr)
                captions_row.append(columns[idx_path_to_im] + ' ' + file_curr)
            img_paths.append(img_paths_row)
            captions.append(captions_row)
        visualize.writeHTML(out_file_html, img_paths, captions,
                            height_width[0], height_width[1])

    return
    params_dict = {}
    params_dict['path_to_data'] = '/disk2/youtube/categories'
    params_dict[
        'score_dir'] = '/disk2/januaryExperiments/tube_scoring/scores_perShot'
    params_dict['mat_dir'] = '/disk2/res11'
    params_dict[
        'out_dir'] = '/disk2/januaryExperiments/tube_scoring/images_perShot'
    if not os.path.exists(params_dict['out_dir']):
        os.mkdir(params_dict['out_dir'])
    params_dict['to_exclude'] = [
        '/disk2/res11/horse_7_16.mat', '/disk2/res11/horse_7_14.mat',
        '/disk2/res11/horse_4_49.mat'
    ]
    params_dict['class_labels_map'] = [('boat', 2), ('train', 9), ('dog', 6),
                                       ('cow', 5), ('aeroplane', 0),
                                       ('motorbike', 8), ('horse', 7),
                                       ('bird', 1), ('car', 3), ('cat', 4)]
    params_dict['n_jobs'] = 12

    params = createParams('drawTubeBB')
    params = params(**params_dict)

    script_drawTubeBB(params)

    return
    shot_dir = '/disk2/januaryExperiments/shot_score_normalized_perShot'
    out_dir = '/disk2/januaryExperiments/tube_scoring/scores_perShot'

    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    class_labels_map = [('boat', 2), ('train', 9), ('dog', 6), ('cow', 5),
                        ('aeroplane', 0), ('motorbike', 8), ('horse', 7),
                        ('bird', 1), ('car', 3), ('cat', 4)]
    n_jobs = 12

    [class_labels, class_idx_all] = zip(*class_labels_map)

    check_list = [str(class_idx) for class_idx in class_idx_all]
    # print check_list

    file_list = [
        file_curr for file_curr in os.listdir(shot_dir)
        if file_curr.endswith('.p') and file_curr[0] in check_list
    ]
    args = [(os.path.join(shot_dir,
                          file_curr), os.path.join(out_dir, file_curr), idx)
            for idx, file_curr in enumerate(file_list)]

    print len(args)

    p = multiprocessing.Pool(n_jobs)
    p.map(saveBestTubeAvgScore, args)

    # for class_idx in class_idx_all:
    #     rel_files=[file_curr for file_curr in file_list if file_curr.startswith(str(class_idx)+'_') and file_curr.endswith('.p')];
    #     for rel_file in rel_files:
    #         score_file=os.path.join(shot_dir,rel_file);
    #         out_file=os.path.join(out_dir,rel_file);
    #         best_tube_rank,tubes_ranked,scores_ranked = getBestTubeAvgScore(score_file)
    #         pickle.dump([best_tube_rank,tubes_ranked,scores_ranked],open(out_file,'wb'));

    #     out_files=[os.path.join(shot_dir,file_curr) for file_curr in file_list if file_curr.startswith(str(class_idx)+'_') and file_curr.endswith('.p')];

    #     score_file=os.path.join(shot_dir,str(class_idx)+'_'+str(video_id)+'_'+str(shot_id)+'.p');
    #     best_tube_rank,tubes_ranked,scores_ranked = getBestTubeAvgScore(os.path.join(shot_dir,score_file))

    return

    path_to_data = '/disk2/youtube/categories'
    to_exclude = [
        '/disk2/res11/horse_7_16.mat', '/disk2/res11/horse_7_14.mat',
        '/disk2/res11/horse_4_49.mat'
    ]

    meta_dir = '/disk2/res11'
    mat_files = pickle.load(open('/disk2/temp/horse_problem.p', 'rb'))
    mat_files = [
        os.path.join(meta_dir, file_curr[file_curr.rindex('/') + 1:] + '.mat')
        for file_curr in mat_files
    ]
    # for mat_file in mat_files:

    mat_file = '/disk2/res11/horse_7_11.mat'
    out_file = '/disk2/temp.png'

    drawTubeBB(mat_file, path_to_data, out_file)

    return
    path_to_data = '/disk2/youtube/categories'
    out_dir_patches = '/disk2/res11/tubePatches'
    path_to_mat = '/disk2/res11'

    if not os.path.exists(out_dir_patches):
        os.mkdir(out_dir_patches)

    mat_files = [
        os.path.join(path_to_mat, file_curr)
        for file_curr in os.listdir(path_to_mat) if file_curr.endswith('.mat')
    ]
    mat_file = mat_files[0]
    print mat_file
    drawTubeBB(mat_file)
    # script_saveTubePatches(mat_files,path_to_data,out_dir_patches,numThreads=8)

    return

    shot_dir = '/disk2/januaryExperiments/shot_score_normalized'
    class_idx = 0
    video_id = 1
    shot_id = 1
    score_file = os.path.join(
        shot_dir,
        str(class_idx) + '_' + str(video_id) + '_' + str(shot_id) + '.p')
    best_tube_rank, tubes_ranked, scores_ranked = getBestTubeAvgScore(
        os.path.join(shot_dir, score_file))
    print best_tube_rank, tubes_ranked, scores_ranked
コード例 #50
0
def main():
    train_file = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_file = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    layers = ['pool5', 'fc6', 'fc7']
    out_dir = '/disk2/octoberExperiments/nn_pascal3d'
    db_file = os.path.join(out_dir, 'nn_pascal3d_new.db')
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    path_to_anno = '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations'
    class_ids = [
        dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')
    ]

    file_pre = train_file
    trainFlag = True
    caffe_model = '/home/maheenrashid/Downloads/caffe/caffe-rc2/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'

    # exceptions=script_populateDB(db_file,file_pre,trainFlag,layers,class_ids,caffe_model)
    # for ex in exceptions:
    #     print len(ex)
    # pickle.dump([exceptions],open(file_pre+'_db_exceptions.p','wb'));

    file_pre = non_train_file
    trainFlag = False
    caffe_model = '/disk2/octoberExperiments/nn_performance_without_pascal/snapshot_iter_450000.caffemodel'
    script_populateDB(db_file, file_pre, trainFlag, layers, class_ids,
                      caffe_model)

    return
    script_saveIndicesAll(train_file, layers)
    script_saveIndicesAll(non_train_file, layers)
    # script_testingDoNN();

    return
    script_createHistComparative()
    # script_createHistsWithSpecificAngle()
    return
    script_createHistComparative()
    # script_createHistDifferenceHTML()
    # script_savePerClassPerDegreeHistograms()
    return
    script_visualizePerClassAzimuthPerformance()
    return
    train_file = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/trained/20151027204114'
    non_train_file = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547'
    layers = ['pool5', 'fc6', 'fc7']
    path_to_anno = '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations'

    for file_name in [train_file, non_train_file]:
        [img_paths, gt_labels,
         azimuths] = pickle.load(open(file_name + '_azimuths.p', 'rb'))
        for layer in layers:
            print layer
            file_name_l = file_name + '_' + layer + '_all'
            out_file = file_name_l + '_azimuths.p'

            t = time.time()
            [img_paths, gt_labels, indices,
             _] = pickle.load(open(file_name_l + '.p', 'rb'))
            t = time.time() - t
            print t
            # raw_input();

            diffs_all, dists_all = getAzimuthInfo(img_paths, gt_labels,
                                                  indices, azimuths)
            pickle.dump([img_paths, gt_labels, azimuths, diffs_all, dists_all],
                        open(out_file, 'wb'))

    return
    text_labels = [
        dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')
    ]
    for file_name in [train_file, non_train_file]:
        [img_paths, gt_labels,
         azimuths] = pickle.load(open(file_name + '_azimuths.p', 'rb'))
        for layer in layers:
            print layer
            file_name_l = file_name + '_' + layer + '_all'
            out_dir = file_name_l + '_azimuths'
            if not os.path.exists(out_dir):
                os.mkdir(out_dir)
            t = time.time()
            [img_paths, gt_labels, indices,
             _] = pickle.load(open(file_name_l + '.p', 'rb'))
            t = time.time() - t
            print t
            # raw_input();
            createAzimuthGraphs(img_paths, gt_labels, indices, azimuths,
                                out_dir, text_labels)

    for layer in layers:
        print layer
        out_file = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/azimuths_' + layer + '_all' + '_comparison.html'
        rel_train = 'trained/20151027204114_' + layer + '_all' + '_azimuths'
        rel_notrain = 'no_trained/20151027203547_' + layer + '_all' + '_azimuths'
        out_dir = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203547_' + layer + '_all' + '_azimuths'

        im_paths = [[
            os.path.join(rel_train, file_curr),
            os.path.join(rel_notrain, file_curr)
        ] for file_curr in os.listdir(out_dir) if file_curr.endswith('.jpg')]

        captions = [['train', 'no_train']] * len(im_paths)
        visualize.writeHTML(out_file,
                            im_paths,
                            captions,
                            height=500,
                            width=500)

    # script_saveAzimuthInfo(train_file,path_to_anno);
    # script_saveAzimuthInfo(non_train_file,path_to_anno);
    # script_saveIndicesAll(train_file,layers)
    # script_saveIndicesAll(non_train_file,layers)

    return
    out_dir = '/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d'
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)
    # out_dir=os.path.join(out_dir,'no_trained');
    out_dir = os.path.join(out_dir, 'trained')
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    path_to_anno = '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations'
    file_dir = '/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB'
    dirs = [
        dir[:-7] for dir in os.listdir(path_to_anno) if dir.endswith('pascal')
    ]
    test_set = []
    for dir_idx, dir in enumerate(dirs):
        ims = [
            filename for filename in glob.glob(file_dir + '/*' + dir + '*.jpg')
        ]
        test_set.extend(zip(ims, [dir_idx] * len(ims)))

    print len(test_set)

    layers = ['pool5', 'fc6', 'fc7']
    gpu_no = 1
    path_to_classify = '..'
    numberOfN = 5
    relativePaths = ['/disk2', '../../../../..']
    deployFile = '/disk2/octoberExperiments/nn_performance_without_pascal/deploy.prototxt'
    meanFile = '/disk2/octoberExperiments/nn_performance_without_pascal/mean.npy'
    modelFile = '/disk2/octoberExperiments/nn_performance_without_pascal/snapshot_iter_450000.caffemodel'
    # file_name=script_nearestNeigbourExperiment.runClassificationTestSet(test_set,out_dir,path_to_classify,gpu_no,layers,deployFile=deployFile,meanFile=meanFile,modelFile=modelFile,ext='jpg')
    # file_name=script_nearestNeigbourExperiment.runClassificationTestSet(test_set,out_dir,path_to_classify,gpu_no,layers,ext='jpg')
    file_name = os.path.join(out_dir, '20151027204114')
    test_set, _ = pickle.load(open(file_name + '.p', 'rb'))
    vals = np.load(file_name + '.npz')

    test_set = sorted(test_set, key=lambda x: x[0])
    test_set = zip(*test_set)

    img_paths = list(test_set[0])
    gt_labels = list(test_set[1])

    numberOfN = 5

    # file_name_alt='/disk2/octoberExperiments/nn_performance_without_pascal/pascal_3d/no_trained/20151027203004'
    for layer in layers:
        print layer
        file_name_l = file_name + '_' + layer
        # indices,conf_matrix=script_nearestNeigbourExperiment.doNN(img_paths,gt_labels,vals[layer],numberOfN=numberOfN,distance='cosine',algo='brute')
        # pickle.dump([img_paths,gt_labels,indices,conf_matrix],open(file_name_l+'.p','wb'));
        [img_paths, gt_labels, indices,
         _] = pickle.load(open(file_name_l + '.p', 'rb'))

        idx_sort_binned = script_nearestNeigbourExperiment.sortByPerformance(
            indices, gt_labels, 0, perClass=True)

        img_paths = [x.replace('/disk2', '../../../../..') for x in img_paths]
        im_paths, captions = visualize.createImageAndCaptionGrid(
            img_paths, gt_labels, indices, dirs)
        im_paths = [im_paths[idx] for idx in idx_sort_binned]
        captions = [captions[idx] for idx in idx_sort_binned]

        visualize.writeHTML(file_name_l + '_sorted.html', im_paths, captions)

    return
    path_to_anno = '/disk2/pascal_3d/PASCAL3D+_release1.0/Annotations'
    path_to_im = '/disk2/pascal_3d/PASCAL3D+_release1.0/Images'
    dirs = [dir for dir in os.listdir(path_to_anno) if dir.endswith('pascal')]

    out_dir = '/disk2/pascal_3d/PASCAL3D+_release1.0/Images_BB'
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    for dir in dirs:
        file_names = [
            os.path.join(dir, file_name)[:-4]
            for file_name in os.listdir(os.path.join(path_to_im, dir))
            if file_name.endswith('.jpg')
        ]
        saveBBImages(path_to_im, path_to_anno, file_names, out_dir)
コード例 #51
0
def script_scoreRandomFrames(params):
    path_to_db = params.path_to_db
    class_labels_map = params.class_labels_map
    npz_path = params.npz_path
    numberOfFrames = params.numberOfFrames
    max_idx = params.max_idx
    n_jobs = params.n_jobs
    table_idx_all = params.table_idx_all
    out_file_html = params.out_file_html
    rel_path = params.rel_path
    width_height = params.width_height
    out_file_frames = params.out_file_frames
    frameCountNorm = params.frameCountNorm

    [class_labels, class_idx] = zip(*class_labels_map)

    if not os.path.exists(out_file_frames):
        if frameCountNorm:
            total_counts = getTotalCountsPerClass(path_to_db, list(class_idx))
        else:
            total_counts = None

        frames_all = []
        for table_idx in table_idx_all:
            scores, class_idx_curr, frame_path = getScoreForIdx(
                table_idx,
                path_to_db,
                class_idx_pascal=class_idx,
                npz_path=npz_path,
                n_jobs=n_jobs,
                total_counts=total_counts)

            frames_all.append([frame_path, class_idx_curr, scores])

        pickle.dump(frames_all, open(out_file_frames, 'wb'))

    frames_all = pickle.load(open(out_file_frames, 'rb'))

    img_paths = []
    captions = []
    for frame_path, class_idx_curr, scores in frames_all:

        scores = np.array(scores)
        avg_scores = np.mean(scores, axis=0)
        gt_idx = class_idx.index(class_idx_curr)
        gt_score = avg_scores[gt_idx]
        sort_idx = np.argsort(avg_scores)[::-1]
        max_idx = sort_idx[0]
        max_score = avg_scores[max_idx]
        max_class_idx = class_idx[max_idx]
        gt_rank = np.where(sort_idx == gt_idx)[0][0]

        caption_curr = []
        caption_curr.append('GT')
        caption_curr.append(class_labels[class_idx.index(class_idx_curr)])
        caption_curr.append(str(round(gt_score, 4)))
        caption_curr.append(str(gt_rank + 1))
        caption_curr.append('Best')
        caption_curr.append(class_labels[max_idx])
        caption_curr.append(str(round(max_score, 4)))

        caption_curr = ' '.join(caption_curr)

        img_paths.append([frame_path.replace(rel_path[0], rel_path[1])])
        captions.append([caption_curr])

    visualize.writeHTML(out_file_html, img_paths, captions, width_height[0],
                        width_height[1])
コード例 #52
0
def main():
    # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters';
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    # util.mkdir(out_dir);

    # train_file=os.path.join(out_dir,'train.txt');

    # template_deploy_file='trainval_noFix_withRandom_diffForConv.prototxt';
    # template_solver_file='solver_debug.prototxt';

    # base_lr=0.000001;
    # snapshot=1000;
    # layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];
    # gpu=1;
    # commands=[];
    # idx=len(layers)-4;
    # fix_layers=layers[1:idx+1];

    # layer_str='_'.join(fix_layers);
    # print layer_str;
    # # return
    # model_file_curr=model_file
    # snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
    # out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
    # out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
    # log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
    # replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu);
    # replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
    # command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
    # util.writeFile(os.path.join(out_dir,'train.sh'),[command]);


    # return
    model_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/opt_noFix_conv1_conv2_conv3_conv4_conv5_llr__iter_50000.caffemodel'
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';
    solver_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/solver_conv1_conv2_conv3_conv4_conv5.prototxt';
    deploy_file='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/deploy_conv1_conv2_conv3_conv4_conv5.prototxt';

    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';
    solver_file='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters/solver_conv1_conv2_conv3_conv4_conv5.prototxt'
    deploy_file='/disk3/maheen_data/ft_youtube_40_noFix_diffLR_sZclusters/deploy_conv1_conv2_conv3_conv4_conv5.prototxt';    
    justCheckGradients(solver_file,deploy_file,model_file);


    return
    out_dir='/disk3/maheen_data/debug_networks/sanityCheckDebug';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    util.mkdir(out_dir);

    train_file=os.path.join(out_dir,'train.txt');

    template_deploy_file='deploy_withRandom.prototxt';
    template_solver_file='solver_debug.prototxt';

    base_lr=0.000001;
    snapshot=1000;
    layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];
    gpu=1;
    commands=[];
    idx=len(layers)-1;
    fix_layers=layers[1:idx+1];

    layer_str='_'.join(fix_layers);
    print layer_str;
    
    model_file_curr=model_file
    snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
    out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
    out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
    log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
    replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu);
    replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
    command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
    util.writeFile(os.path.join(out_dir,'train.sh'),[command]);


    return

    out_dir='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig/';
    out_dir='/disk3/maheen_data/ft_youtube_40_images_cluster_suppress_yjConfig_llr_diff/';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    util.mkdir(out_dir);

    train_file=os.path.join(out_dir,'train.txt');

    template_deploy_file='deploy_withRandom_yjConfig.prototxt';
    template_solver_file='solver_debug.prototxt';

    base_lr=0.00001;
    snapshot=500;
    layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];
    gpu=1;
    commands=[];
    idx=len(layers)-4;
    fix_layers=layers[1:idx+1];

    layer_str='_'.join(fix_layers);
    print layer_str;
    # return
    model_file_curr=model_file
    snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
    out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
    out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
    log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
    replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu);
    replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
    command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
    util.writeFile(os.path.join(out_dir,'train.sh'),[command]);



    return
    out_dir='/disk3/maheen_data/ft_youtube_40_noFix_noCopyFC8_FC7';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';

    # out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet';
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel';

    util.mkdir(out_dir);
    train_txt_orig_path='/disk3/maheen_data/ft_youtube_40/train.txt';

    
    template_deploy_file='deploy_withRandom.prototxt';
    template_solver_file='solver_debug.prototxt';

    train_file=os.path.join(out_dir,'train.txt');
    
    data=util.readLinesFromFile(train_txt_orig_path);
    random.shuffle(data);
    # data[:100];
    util.writeFile(train_file,data);


    # shutil.copyfile(train_txt_orig_path,train_file);



    # out_dir='/disk3/maheen_data/ft_youtube_40_ucf_permute';
    # train_file=os.path.join(out_dir,'train_permute.txt');

    

    base_lr=0.0001;
    snapshot=2000;
    layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];
    gpu=0;
    # command_file=os.path.join(out_dir,'debug_0.sh');
    commands=[];

    # for idx in range(4,len(layers)):
    #     if idx==0:
    #         fix_layers=layers[0];
    #         layer_str=str(fix_layers);
    #         model_file_curr=None;
    #     else:

    idx=len(layers)-3;
    fix_layers=layers[1:idx+1];

    layer_str='_'.join(fix_layers);
    print layer_str;

    return
    model_file_curr=model_file
    # print fix_layers
    snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_');
    out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt');
    out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt');
    log_file=os.path.join(out_dir,'log_'+layer_str+'.log');
    replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu=gpu);
    replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers);
    command=printTrainingCommand(out_solver_file,log_file,model_file_curr);
    util.writeFile(os.path.join(out_dir,'train.sh'),[command]);

    # commands.append(command);

    
    # util.writeFile(command_file,commands);





    return
    # out_dir='/disk3/maheen_data/debug_networks/noFix';
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';
    # '/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/train.txt'
    # data=util.readLinesFromFile(train_txt_orig_path);
    # random.shuffle(data);
    # # data[:100];
    # util.writeFile(train_file,data[:100]);

    # out_dir='/disk3/maheen_data/debug_networks/noFixNoCopy';
    # model_file=None;


    out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayer';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';


    util.mkdir(out_dir);
    train_txt_orig_path='/disk3/maheen_data/debug_networks/noFix/train.txt';

    deploy_file='/disk3/maheen_data/debug_networks/noFix/deploy.prototxt';
    solver_file='/disk3/maheen_data/debug_networks/noFix/solver.prototxt';

    # template_deploy_file='deploy_debug_noFix.prototxt';
    template_deploy_file='deploy_fc8NoCopy.prototxt';
    template_solver_file='solver_debug.prototxt';

    train_file=os.path.join(out_dir,'train.txt');
    

    # shutil.copyfile(train_txt_orig_path,train_file);


    

    base_lr=0.0001;
    snapshot=100;
    layers=[None,'conv1','conv2','conv3','conv4','conv5','fc6','fc7','fc8'];









    return
    out_dir='/disk3/maheen_data/ft_youtube_40_noFix_alexnet';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel';
    layers=['conv1','conv2','conv3','conv4','conv5','fc6','fc7'];

    layers_str=[];
    for idx in range(len(layers)):
        if idx==0:
            fix_layers=layers[0];
            layer_str=str(fix_layers);
        else:
            fix_layers=layers[1:idx+1];
            layer_str='_'.join(fix_layers);
        layers_str.append(layer_str);

    log_files=[os.path.join(out_dir,'log_'+layer_str+'.log') for layer_str in layers_str];
    str_match=' solver.cpp:209] Iteration ';
    xAndYs=[svl.getIterationsAndLosses(log_file,str_match) for log_file in log_files];

    out_files=[];
    for layer_str,log_file in zip(layers_str,log_files):
        xAndY=svl.getIterationsAndLosses(log_file,str_match);
        print xAndY
        out_file=os.path.join(out_dir,'loss_'+layer_str+'.png');
        visualize.plotSimple([xAndY],out_file,title=layer_str);
        out_files.append(out_file);

    out_file_html=os.path.join(out_dir,'losses_all.html');
    img_paths=[[util.getRelPath(out_file,'/disk3')] for out_file in out_files];
    captions=[['']]*len(out_files);
    print img_paths
    print captions
    visualize.writeHTML(out_file_html,img_paths,captions,height=300,width=300);

    # out_file=os.path.join(out_dir,'losses_all.png');

    # print len(xAndYs);
    # print xAndYs[-2][1]

    # visualize.plotSimple(xAndYs,out_file,legend_entries=layers_str,loc=0,outside=True)

    


        

        







    


    return
    # mean_standard_proto_file='/home/maheenrashid/Downloads/debugging_jacob/opticalflow/standard.binaryproto';
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel';
    layers_to_copy=['conv1','conv2','conv3','conv4','conv5']

    # model_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/OptFlow_youtube_hmdb__iter_5000.caffemodel';
    # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']

    # model_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic_llr/OptFlow_youtube_hmdb__iter_65000.caffemodel';
    # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']


    # deploy_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/opt_train_coarse_xavier.prototxt';
    # solver_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/train.prototxt';
    deploy_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/deploy_debug.prototxt';
    solver_file='/disk2/mayExperiments/ft_youtube_hmdb_newClusters_layerMagic/solver_debug.prototxt';

    # layers_to_copy=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']
    # layers_to_explore=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']

    
    # ,'fc6','fc7','fc8']
    layers_to_explore=['conv1','conv2','conv3','conv4','conv5','fc6_fix','fc7_fix','fc8_fix']
    blobs_lr=[(0,0),(0,0),(0,0),
            # (10,20),(10,20),
            (0.1,0.2),(0.1,0.2),
            (1,2),(1,2),(1,2)]

    iterations=400;
    momentum=0.9;
    lr=0.000001;

    caffe.set_device(1)
    caffe.set_mode_gpu()



    solver=caffe.SGDSolver(solver_file);

    
    net_org=caffe.Net(deploy_file,model_file);
        
    # copy weights
    for layer_name in layers_to_copy:
        solver.net.params[layer_name][0].data[...]=net_org.params[layer_name][0].data;
        solver.net.params[layer_name][1].data[...]=net_org.params[layer_name][1].data;

    layer_names=list(solver.net._layer_names);

    ratios={};
    for key in layers_to_explore:
        ratios[key]=[];


    dict_layers={};
    for idx_curr,layer_name in enumerate(layer_names):
        print idx_curr,layer_name,
        if layer_name in solver.net.params.keys():
            print len(solver.net.params[layer_name])
            update_prev=[np.zeros(solver.net.layers[idx_curr].blobs[0].diff.shape),
                        np.zeros(solver.net.layers[idx_curr].blobs[1].diff.shape)];
            blob_lr=list(blobs_lr[layers_to_explore.index(layer_name)]);
            dict_layers[idx_curr]=[layer_name,update_prev,blob_lr];
        else:
            print 0;

    for idx_curr in dict_layers.keys():
        print idx_curr,len(dict_layers[idx_curr]),dict_layers[idx_curr][0],dict_layers[idx_curr][1][0].shape,dict_layers[idx_curr][1][1].shape,dict_layers[idx_curr][2]

    
    for iteration in range(iterations):
        print iteration
    


        solver.net.forward();
        solver.net.backward();
        
        for idx_curr in dict_layers.keys():

            rel_row=dict_layers[idx_curr]
            layer_name=rel_row[0];
            update_prev=rel_row[1][0];
            print rel_row[2][0]
            lr_curr=rel_row[2][0]*lr;
            
            diffs_curr=solver.net.params[layer_name][0].diff;
            weights_curr=solver.net.params[layer_name][0].data;

            param_scale = np.linalg.norm(weights_curr.ravel())

            update = update_prev*momentum-lr_curr*diffs_curr;
            
            update_scale = np.linalg.norm(update.ravel())
            ratio= update_scale / param_scale # want ~1e-3
            print layer_name,ratio,update_scale,param_scale
            ratios[layer_name].append(ratio);
        
        for idx_curr,layer in enumerate(solver.net.layers):
            for idx_blob,blob in enumerate(layer.blobs):
                rel_row=dict_layers[idx_curr]
                layer_name=rel_row[0];
                update_prev=rel_row[1][idx_blob];
                lr_curr=rel_row[2][idx_blob]*lr;
                
                diffs_curr=blob.diff;
                update_curr=momentum*update_prev-(lr_curr*diffs_curr);
                blob.data[...] -= update_curr
                blob.diff[...] = np.zeros(blob.diff.shape);
                
                dict_layers[idx_curr][1][idx_blob]=update_curr;
コード例 #53
0
def main():

    old_file = '/home/laoreja/data/knn_res_new/knn_5_points_train_list.txt'
    new_file = '/home/maheenrashid/Downloads/knn_5_points_train_list_clean.txt'
    match_str = 'n02374451_4338.JPEG'
    lines = util.readLinesFromFile(old_file)
    lines_to_keep = []
    for line in lines:
        if match_str not in line:
            lines_to_keep.append(line)
    assert len(lines_to_keep) == len(lines) - 1
    util.writeFile(new_file, lines_to_keep)

    return
    file_curr = '/home/laoreja/finetune-deep-landmark/dataset/train/trainImageList_2.txt'
    out_file = '/home/maheenrashid/Downloads/trainImageList_2_clean.txt'
    lines = util.readLinesFromFile(file_curr)
    lines_to_keep = []
    for line in lines:
        if line == '/home/laoreja/data/horse-images/annotation/imagenet_n02374451/gxy/n02374451_4338.JPEG 156 169 79 99 161 88 1 43 46 1 167 95 1 164 95 1 43 56 1':
            print 'found!'
        else:
            lines_to_keep.append(line)

    print len(lines_to_keep), len(lines)
    assert len(lines_to_keep) + 1 == len(lines)
    util.writeFile(out_file, lines_to_keep)

    return
    horse_file = '/home/SSD3/maheen-data/horse_project/horse/matches_5_val_fiveKP.txt'
    human_file = '/home/SSD3/maheen-data/horse_project/aflw/matches_5_val_fiveKP_noIm.txt'
    horse_data = util.readLinesFromFile(horse_file)
    human_data = util.readLinesFromFile(human_file)

    # horse_data=[horse_data[41]];
    # human_data=[human_data[41]];
    # print horse_data[0];

    horse_im = [line_curr.split(' ')[0] for line_curr in horse_data]
    human_im = [
        line_curr.split(' ')[0].replace('/npy/',
                                        '/im/').replace('.npy', '.jpg')
        for line_curr in human_data
    ]

    horse_npy = [line_curr.split(' ')[1] for line_curr in horse_data]
    human_npy = [line_curr.split(' ')[0] for line_curr in human_data]

    problem_cases = []
    for horse_npy_curr in horse_npy:
        labels = np.load(horse_npy_curr)
        if np.any(labels < 0):
            problem_cases.append(horse_npy_curr)

    print len(problem_cases), len(set(problem_cases))

    return

    dir_server = '/home/SSD3/maheen-data'
    out_dir_debug = os.path.join(dir_server, 'temp',
                                 'debug_problem_batch/rerun')

    im_file = '/home/laoreja/data/horse-images/annotation/imagenet_n02374451/gxy/n02374451_4338.JPEG'
    npy_file = '/home/SSD3/maheen-data/temp/debug_problem_batch/rerun/npy/imagenet_n02374451/n02374451_4338.npy'

    out_file = os.path.join(out_dir_debug, 'check.png')
    saveImWithAnno((1, im_file, npy_file, out_file))

    # arg=([156, 169, 79, 99], [[161, 88, 1], [43, 46, 1], [167, 95, 1], [164, 95, 1], [43, 56, 1]], '/home/SSD3/maheen-data/temp/debug_problem_batch/rerun/npy/imagenet_n02374451/n02374451_4338.npy', 0);
    # # print np.load(arg[2]);
    # saveBBoxNpy(arg);
    # # print np.load(arg[2]);

    return
    dir_server = '/home/SSD3/maheen-data'
    out_dir_debug = os.path.join(dir_server, 'temp',
                                 'debug_problem_batch/rerun')
    util.mkdir(out_dir_debug)
    params_dict = {}
    params_dict[
        'path_txt'] = '/home/SSD3/maheen-data/temp/debug_problem_batch/train_dummy.txt'
    # '/home/laoreja/finetune-deep-landmark/dataset/train/trainImageList_2.txt';
    params_dict['path_pre'] = None
    params_dict['type_data'] = 'horse'
    params_dict['out_dir_meta'] = out_dir_debug
    util.mkdir(params_dict['out_dir_meta'])
    params_dict['out_dir_im'] = os.path.join(params_dict['out_dir_meta'], 'im')
    params_dict['out_dir_npy'] = os.path.join(params_dict['out_dir_meta'],
                                              'npy')
    params_dict['out_file_list_npy'] = os.path.join(params_dict['out_dir_npy'],
                                                    'data_list.txt')
    params_dict['out_file_list_im'] = os.path.join(params_dict['out_dir_im'],
                                                   'data_list.txt')
    params_dict['out_file_pairs'] = os.path.join(params_dict['out_dir_meta'],
                                                 'pairs.txt')
    params_dict['overwrite'] = True
    local_script_makeBboxPairFiles(params_dict)

    return
    npy_file = '/home/SSD3/maheen-data/horse_project/horse/npy/imagenet_n02374451/n02374451_4338.npy'
    labels = np.load(npy_file)
    print labels

    return
    dir_server = '/home/SSD3/maheen-data'
    out_dir_debug = os.path.join(dir_server, 'temp', 'debug_problem_batch')
    util.mkdir(out_dir_debug)

    out_horse_im_dir = os.path.join(out_dir_debug, 'horse_im')
    out_human_im_dir = os.path.join(out_dir_debug, 'human_im')
    util.mkdir(out_horse_im_dir)
    util.mkdir(out_human_im_dir)

    horse_file = '/home/SSD3/maheen-data/horse_project/horse_resize/matches_5_train_fiveKP_debug.txt'
    human_file = '/home/SSD3/maheen-data/horse_project/aflw/matches_5_train_fiveKP_noIm_debug.txt'
    horse_data = util.readLinesFromFile(horse_file)
    human_data = util.readLinesFromFile(human_file)
    horse_data = [horse_data[41]]
    human_data = [human_data[41]]
    print horse_data[0]

    horse_im = [line_curr.split(' ')[0] for line_curr in horse_data]
    human_im = [
        line_curr.split(' ')[0].replace('/npy/',
                                        '/im/').replace('.npy', '.jpg')
        for line_curr in human_data
    ]

    horse_npy = [line_curr.split(' ')[1] for line_curr in horse_data]
    human_npy = [line_curr.split(' ')[0] for line_curr in human_data]

    args = []
    for idx, horse_im_curr in enumerate(horse_im):
        args.append((idx, horse_im_curr, horse_npy[idx],
                     os.path.join(out_horse_im_dir,
                                  str(idx) + '.jpg')))
    for idx, horse_im_curr in enumerate(human_im):
        args.append((idx, horse_im_curr, human_npy[idx],
                     os.path.join(out_human_im_dir,
                                  str(idx) + '.jpg')))

    # saveImWithAnno(args[-1]);
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    p.map(saveImWithAnno, args)

    out_file_html = os.path.join(out_dir_debug, 'viz_matches.html')
    img_paths = []
    captions = []

    for idx in range(len(horse_im)):
        horse_im_curr = os.path.join(out_horse_im_dir,
                                     str(idx) + '.jpg')
        horse_im_curr = util.getRelPath(horse_im_curr, dir_server)
        human_im_curr = os.path.join(out_human_im_dir,
                                     str(idx) + '.jpg')
        human_im_curr = util.getRelPath(human_im_curr, dir_server)
        img_paths.append([horse_im_curr, human_im_curr])
        captions.append(['horse ' + str(idx), 'human'])

    # for idx,horse_im_curr in enumerate(horse_im):
    #   human_im_curr=util.getRelPath(human_im[idx],dir_server);
    #   horse_im_curr=util.getRelPath(horse_im_curr,dir_server);
    #   img_paths.append([horse_im_curr,human_im_curr]);
    #   captions.append(['horse','human']);

    visualize.writeHTML(out_file_html, img_paths, captions, 224, 224)