示例#1
0
def writeSmallDatasetFile(out_file_pre,horse_data,num_neighbor,
                          num_data,in_file_horse,in_file_face,in_file_face_noIm,post_tags=None):
    if post_tags is None:
        post_tags=['_horse.txt','_face.txt','_face_noIm.txt'];
        
    in_files=[in_file_horse,in_file_face,in_file_face_noIm];
    
    data_org=util.readLinesFromFile(in_file_horse);
    data_org=np.array(data_org);
    idx_keep_all=[];
    print horse_data.shape
    horse_data=horse_data[:num_data];
    for horse_curr in horse_data:
        idx_curr=np.where(data_org==horse_curr)[0];
        idx_curr=np.sort(idx_curr)
        idx_keep=idx_curr[:num_neighbor];
        idx_keep_all=idx_keep_all+list(idx_keep);
#         print num_data,idx_keep
        
    idx_keep_all=np.array(idx_keep_all);
    print idx_keep_all.shape
    files_to_return=[];
    for idx_in_file,in_file in enumerate(in_files):
        out_file_curr=out_file_pre+post_tags[idx_in_file];
        if idx_in_file==0:
            data_keep=data_org[idx_keep_all];
        else:
            data_curr=util.readLinesFromFile(in_file);
            data_curr=np.array(data_curr);
            data_keep=data_curr[idx_keep_all];
        util.writeFile(out_file_curr,data_keep);
        files_to_return.append(out_file_curr);
    
    return files_to_return;
示例#2
0
def makeCulpritFile():

    out_dir = '/home/SSD3/maheen-data/temp/debug_problem_batch'
    file_human = '/home/SSD3/maheen-data/horse_project/aflw/matches_5_train_fiveKP_noIm.txt'
    file_horse = '/home/SSD3/maheen-data/horse_project/horse_resize/matches_5_train_fiveKP.txt'
    new_file_human = file_human[:file_human.rindex('.')] + '_debug.txt'
    new_file_horse = file_horse[:file_horse.rindex('.')] + '_debug.txt'
    batch_no = 3
    batch_size = 64

    data_horse = util.readLinesFromFile(file_horse)
    data_human = util.readLinesFromFile(file_human)

    assert len(data_horse) == len(data_human)
    print(len(data_horse) / batch_size)
    # for batch_no in range(71,72):
    batch_no = 71
    line_idx = (batch_size * (batch_no - 1)) % len(data_horse)

    print('____')
    print(batch_no)
    print(line_idx)
    print data_horse[line_idx]
    print data_human[line_idx]
    data_horse_rel = data_horse[line_idx:line_idx + batch_size]
    data_human_rel = data_human[line_idx:line_idx + batch_size]
    assert len(data_horse_rel) == batch_size
    assert len(data_human_rel) == batch_size

    util.writeFile(new_file_horse, data_horse_rel)
    util.writeFile(new_file_human, data_human_rel)
    print new_file_human
    print new_file_horse
def script_vizForHMDB():

	out_dir='/disk2/mayExperiments/debug_finetuning/hmdb';
	clusters_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat';
	vid_list=os.path.join(out_dir,'video_list.txt');
	out_dir_viz=os.path.join(out_dir,'im');
	util.mkdir(out_dir_viz);
	out_file_html=out_dir_viz+'.html';
	
	path_to_hmdb='/disk2/marchExperiments/hmdb_try_2/hmdb'

	dirs=util.readLinesFromFile(vid_list);
	dirs=[os.path.join(path_to_hmdb,dir_curr) for dir_curr in dirs[2:]];
	random.shuffle(dirs);
	num_to_evaluate=100;
	out_file_tif=os.path.join(out_dir,'tif_list.txt');

	# recordContainingFiles(dirs,num_to_evaluate,out_file_flo,post_dir='images',ext='.flo');
	tif_files=util.readLinesFromFile(out_file_tif);
	tif_files=tif_files[:100];
	img_files=[file_curr.replace('.tif','.jpg') for file_curr in tif_files];
	flo_files=[file_curr.replace('.tif','.flo') for file_curr in tif_files];
	clusters=po.readClustersFile(clusters_file);

	script_writeFloVizHTML(out_file_html,out_dir_viz,flo_files,img_files,tif_files,clusters,True)
示例#4
0
def writeMinLossFileLossData(out_file_pre,post_tags,minloss_post,loss_file):
    new_files=[out_file_pre+post_tag_curr for post_tag_curr in post_tags];
    horse_data=util.readLinesFromFile(new_files[0]);
    horse_data=np.array(horse_data);
    horse_data_uni=np.unique(horse_data);
    face_data=util.readLinesFromFile(new_files[1]);
    face_data_noIm=util.readLinesFromFile(new_files[2]);
    assert len(face_data)==len(face_data_noIm);
    
    loss_all=np.load(loss_file);
    loss_all=loss_all[:len(face_data)];
    assert loss_all.shape[0]==len(face_data);
    
    new_data=[[],[],[]];
    for idx_curr,horse_curr in enumerate(horse_data_uni):
        idx_rel=np.where(horse_data==horse_curr)[0];
        loss_rel=loss_all[idx_rel];
        min_idx=np.argmin(loss_rel);
        min_idx_big=idx_rel[min_idx];
        assert loss_rel[min_idx]==loss_all[min_idx_big];
        new_data[0].append(horse_curr);
        new_data[1].append(face_data[min_idx_big]);
        new_data[2].append(face_data_noIm[min_idx_big]);
  
    new_files_out=[new_file_curr[:new_file_curr.rindex('.')]+minloss_post for new_file_curr in new_files];
    for new_file_to_write,data_to_write in zip(new_files_out,new_data):
        print new_file_to_write,len(data_to_write);
        util.writeFile(new_file_to_write,data_to_write);
def main():

    
    return
    out_dir='/group/leegrp/maheen_data/flo_all_predictions_finetuned_model/results'
    dir_flo_im='/group/leegrp/maheen_data/flo_all_predictions_finetuned_model/flo_npy';
    util.mkdir(dir_flo_im);
    h5_file_list='/group/leegrp/maheen_data/flo_all_predictions_finetuned_model/h5_file_list.txt';

    util.mkdir(dir_flo_im);
    clusters_file='/group/leegrp/maheen_data/flo_all_predictions_finetuned_model/clusters.mat';
    vision3_path='/disk2/marchExperiments';
    hpc_path='/group/leegrp/maheen_data';


    # replace=['flo_all_predictions','flo_all_predictions_finetuned_model']
    h5_files=util.readLinesFromFile(h5_file_list);
    # h5_files=[file_curr.replace(replace[0],replace[1]) for file_curr in h5_files];
    print h5_files[0];
    # print len(h5_files);
    # print os.path.exists(h5_files[0]);
    # util.writeFile(h5_file_list,h5_files);

    script_saveFloAsNpPred(clusters_file,h5_files,dir_flo_im,[vision3_path,hpc_path])        

    return

    dir_meta_images='/group/leegrp/maheen_data/youtube';
    flo_files_list='/group/leegrp/maheen_data/youtube_list_flo_paths.txt';
    out_dir_numpy='/group/leegrp/maheen_data/youtube_resized_flo_npy_new';
    util.mkdir(out_dir_numpy);

    flo_files=util.readLinesFromFile(flo_files_list);
    img_files=[];
    args=[];
    
    for idx,flo_file in enumerate(flo_files):
        flo_name=flo_file[flo_file.rindex('/')+1:];
        video_name=flo_name[:flo_name.index('.')];
        just_name=flo_name[:flo_name.rindex('.')]
        img_name=just_name+'.jpg';
        
        img_path=os.path.join(dir_meta_images,video_name,'images_transfer',img_name);
        out_file_num=os.path.join(out_dir_numpy,just_name+'.npy');
        args.append((idx,flo_file,img_path,out_file_num));

    print len(args)
    print args[0];
    # print multiprocessing.cpu_count()
    p=multiprocessing.Pool(multiprocessing.cpu_count());
    p.map(saveResizeFlo,args);
示例#6
0
def parseAnnoFile(path_txt, path_pre=None, face=False):
    face_data = util.readLinesFromFile(path_txt)

    path_im = []
    bbox = []
    anno_points = []

    for line_curr in face_data:
        line_split = line_curr.split(' ')
        pts = [float(str_curr) for str_curr in line_split[1:]]
        pts = [int(str_curr) for str_curr in pts]
        if path_pre is not None:
            path_im.append(
                os.path.join(path_pre, line_split[0].replace('\\', '/')))
        else:
            path_im.append(line_split[0])

        bbox.append(pts[:4])

        if face:
            increment = 2
        else:
            increment = 3

        anno_points_curr = [
            pts[start:start + increment]
            for start in range(4, len(pts), increment)
        ]
        anno_points.append(anno_points_curr)

    return path_im, bbox, anno_points
def main():
	text_list='/disk2/aprilExperiments/dual_flow/list_of_dats_to_move.txt';
	text_mv='/disk2/aprilExperiments/dual_flow/list_of_dats_to_move_commands.sh';
	models=util.readLinesFromFile(text_list);
	path_to_storage='/media/maheenrashid/Seagate\ Backup\ Plus\ Drive/maheen_data';
	path_to_replace='/disk2';
	
	mv_commands=[];

	for model in models:
		if not os.path.exists(model):
			continue;
		dir_curr=model[:model.rindex('/')];
		dir_new=dir_curr.replace(path_to_replace,path_to_storage);

		# print dir_new;
		
		command='mkdir -p '+dir_new;
		# print command;
		mv_command='mv -v '+model+' '+dir_new+'/';
		# print mv_command
		mv_commands.append(mv_command);
		subprocess.call(command,shell=True);
		# raw_input();

	util.writeFile(text_mv,mv_commands);
	print text_mv
示例#8
0
def script_saveHumanOnlyNeg():
    out_file='/disk2/aprilExperiments/positives_160_human.txt'
    out_dir='/disk2/aprilExperiments/negatives_npy_onlyHuman';
    util.mkdir(out_dir);
    im_pre='COCO_train2014_'

    lines=util.readLinesFromFile(out_file);
    img_files=[line[:line.index(' ')] for line in lines];

    img_names=util.getFileNames(img_files,ext=False);
    img_name=img_names[0];
    
    print img_name
    
    img_name_split=img_name.split('_');
    idx_all=[int(img_name.split('_')[-1]) for img_name in img_names];

    print len(img_names),len(idx_all),idx_all[0];
    cat_id=1;

    path_to_anno='/disk2/ms_coco/annotations';
    anno_file='instances_train2014.json';
    anno=json.load(open(os.path.join(path_to_anno,anno_file),'rb'))['annotations'];
    
    script_saveBboxFiles(anno,out_dir,im_pre,idx_all,cat_id)
def shortenTrainingData(train_txt,train_txt_new,ratio_txt,val_txt_new=None):
    # pos_human='/disk3/maheen_data/headC_160/noFlow_gaussian_human/pos_flos/positives_onlyHuman_withFlow.txt';
    # neg_human='/disk3/maheen_data/headC_160/neg_flos/negatives_onlyHuman_withFlow.txt';

    # pos_human_small='/disk3/maheen_data/headC_160/noFlow_gaussian_human/pos_flos/positives_onlyHuman_withFlow_oneHundreth.txt';
    # neg_human_small='/disk3/maheen_data/headC_160/neg_flos/negatives_onlyHuman_withFlow_oneHundreth.txt';

    # ratio_txt=100;
    # shortenTrainingData(pos_human,pos_human_small,ratio_txt);
    # shortenTrainingData(neg_human,neg_human_small,ratio_txt);

    train_data=util.readLinesFromFile(train_txt);
    # print ratio_txt
    if ratio_txt<1:
        ratio_txt=int(len(train_data)*ratio_txt);
        # print ratio_txt;

    random.shuffle(train_data);
    train_data_new=train_data[:ratio_txt];
    print len(train_data),len(train_data_new);
    util.writeFile(train_txt_new,train_data_new);

    if val_txt_new is not None:
        val_data=train_data[ratio_txt:];
        print len(val_data);
        util.writeFile(val_txt_new,val_data);
示例#10
0
def script_writeCommandsForPreprocessing(all_dirs_file,
                                         command_file_pre,
                                         num_proc,
                                         check_file=None):
    all_dirs = util.readLinesFromFile(all_dirs_file)
    all_dirs = [dir_curr[:-1] for dir_curr in all_dirs]

    if check_file is not None:
        all_dirs = getRemainingDirs(all_dirs, check_file)

    command_pre = 'echo '
    command_middle_1 = ';cd ~/Downloads/opticalflow; matlab -nojvm -nodisplay -nosplash -r "out_folder=\''
    command_middle = '\';saveTrainingData" > '
    command_end = ' 2>&1'

    commands = []
    for dir_curr in all_dirs:
        dir_curr = util.escapeString(dir_curr)
        log_file = os.path.join(dir_curr, 'log.txt')
        command = command_pre + dir_curr + command_middle_1 + dir_curr + command_middle + log_file + command_end
        commands.append(command)

    idx_range = util.getIdxRange(len(commands), len(commands) / num_proc)
    command_files = []
    for i, start_idx in enumerate(idx_range[:-1]):
        command_file_curr = command_file_pre + str(i) + '.txt'
        end_idx = idx_range[i + 1]
        commands_rel = commands[start_idx:end_idx]
        util.writeFile(command_file_curr, commands_rel)
        command_files.append(command_file_curr)
    return command_files
def saveGTData(path_to_txt, class_labels_map, out_file):

    [class_labels, class_idx_all] = zip(*class_labels_map)

    sticks = util.readLinesFromFile(path_to_txt)
    print len(sticks)

    meta_info = []
    coords = []

    for stick in sticks:
        stick_split = stick.split('/')
        stick_split = [curr for curr in stick_split if curr != '']
        class_label = stick_split[3]
        class_idx = class_idx_all[class_labels.index(class_label)]
        video_id = int(stick_split[5])
        shot_id = int(stick_split[7])
        frame_id = stick_split[-1]
        frame_id = int(frame_id[:frame_id.index('.')].strip('frame'))
        meta_info_curr = [class_idx, video_id, shot_id, frame_id]
        res = scipy.io.loadmat(stick, squeeze_me=True, struct_as_record=False)
        boxes = res['coor']
        if not hasattr(boxes[0], '__iter__'):
            boxes = [boxes]
        else:
            print 'found it!', boxes

        for box in boxes:
            meta_info.append(meta_info_curr)
            coords.append(list(box))

    pickle.dump([meta_info, coords], open(out_file, 'wb'))
def addDimFaceData():
    face_data_dir = '/home/SSD3/maheen-data/face_data/npy'
    out_dir = '/home/SSD3/maheen-data/face_data/npy_dimAdd'
    util.mkdir(out_dir)

    face_data_file = os.path.join(face_data_dir, 'data_list.txt')
    in_files = util.readLinesFromFile(face_data_file)

    args = []
    for idx, in_file in enumerate(in_files):
        out_file = in_file.replace(face_data_dir, out_dir)

        if os.path.exists(out_file):
            continue

        folder_curr = out_file[:out_file.rindex('/')]
        util.mkdir(folder_curr)
        args.append((idx, in_file, out_file))

    print len(args)
    # for arg in args:
    #   addDim(arg);
    #   break;
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    p.map(addDim, args)
示例#13
0
def saveRecordOfCountErrorFiles(score_files, class_label, path_to_patches,
                                out_file):
    record = []
    for idx_file_curr, file_curr in enumerate(score_files):
        print idx_file_curr, len(score_files)
        file_name = file_curr[file_curr.rindex('/') + 1:file_curr.rindex('.')]
        class_idx = int(file_name[:file_name.index('_')])
        video_id = int(file_name[file_name.index('_') +
                                 1:file_name.rindex('_')])
        shot_id = int(file_name[file_name.rindex('_') + 1:])

        scores = pickle.load(open(file_curr, 'rb'))

        shot_string = class_label
        # class_labels[class_idx_all.index(class_idx)];
        shot_string = os.path.join(
            path_to_patches,
            shot_string + '_' + str(video_id) + '_' + str(shot_id))

        for tube_id in scores:
            scores_curr = scores[tube_id]
            txt_file = os.path.join(path_to_patches, shot_string, str(tube_id),
                                    str(tube_id) + '.txt')
            patch_paths = util.readLinesFromFile(txt_file)
            if scores_curr.shape[0] != len(patch_paths):
                print 'PROBLEM'
                record.append(
                    (txt_file, file_curr, tube_id, scores_curr.shape[0],
                     len(patch_paths)))
    print len(record)
    pickle.dump(record, open(out_file, 'wb'))
示例#14
0
def getListScoresAndPatches_multiProc((idx_file_curr, file_curr, class_label,
                                       path_to_patches)):
    list_scores = []
    list_files = []
    print idx_file_curr, file_curr
    file_name = file_curr[file_curr.rindex('/') + 1:file_curr.rindex('.')]
    class_idx = int(file_name[:file_name.index('_')])
    video_id = int(file_name[file_name.index('_') + 1:file_name.rindex('_')])
    shot_id = int(file_name[file_name.rindex('_') + 1:])

    scores = pickle.load(open(file_curr, 'rb'))

    shot_string = class_label
    shot_string = os.path.join(
        path_to_patches,
        shot_string + '_' + str(video_id) + '_' + str(shot_id))

    for tube_id in scores:
        scores_curr = scores[tube_id]
        scores_curr = np.mean(scores_curr, axis=1)
        list_scores.extend(list(scores_curr))

        txt_file = os.path.join(path_to_patches, shot_string, str(tube_id),
                                str(tube_id) + '.txt')
        patch_paths = util.readLinesFromFile(txt_file)
        assert len(patch_paths) == scores_curr.shape[0]
        list_files.extend(patch_paths)
    return (list_scores, list_files)
示例#15
0
def experiment_nnPatches(params):
    out_file_text=params.out_file_text;
    class_ids=params.class_id;

    img_paths=util.readLinesFromFile(out_file_text)
    class_id_idx_tuples=[];
    for img_path in img_paths:
        class_id=img_path[:img_path.rindex('_')]
        class_id=class_id[class_id.rindex('_')+1:];
        class_idx=class_ids.index(class_id);
        class_idx=params.class_idx[class_idx];
        class_id_idx_tuples.append((class_id,class_idx));

    file_names_mat,object_indices=recreateOriginalPaths(params.path_to_annotation,img_paths,returnObject_idx=True);
    print 'getting azimuths'
    azimuths=[getObjectStruct(file_name,object_idx).viewpoint.azimuth_coarse  for file_name,object_idx in zip(file_names_mat,object_indices)]
    
    if params.out_file_layers is None:
        print 'running layers part'
        out_file_layers=caffe_wrapper.saveFeaturesOfLayers(out_file_text,params.path_to_classify,params.gpu_no,params.layers,ext='jpg',out_file=params.out_file_pre,meanFile=params.caffe_mean,deployFile=params.caffe_deploy,modelFile=params.caffe_model,images_dim=params.images_dim)
        params=params._replace(out_file_layers=out_file_layers)
        
    out_file_layers=params.out_file_layers;

    print 'writing to db'
    for layer in params.layers:
        vals=np.load(out_file_layers);
        indices,distances=nearest_neighbor.doCosineDistanceNN(vals[layer],numberOfN=None);
        mani=Pascal3D_Manipulator(params.db_path_out);
        mani.openSession();
        for idx in range(len(img_paths)):
            mani.insert(idx,img_paths[idx],layer,out_file_layers,class_id_idx_tuples[idx][0],class_id_idx_tuples[idx][1],params.caffe_model, azimuth=azimuths[idx],neighbor_index=indices[idx],neighbor_distance=distances[idx],trainedClass=params.trainFlag,commitFlag=False)
        mani.closeSession();

    return params
def script_writeCommandsForPreprocessing(all_dirs_file,command_file_pre,num_proc,check_file=None):
    all_dirs=util.readLinesFromFile(all_dirs_file);
    all_dirs=[dir_curr[:-1] for dir_curr in all_dirs];
    
    if check_file is not None:
        all_dirs=getRemainingDirs(all_dirs,check_file);

    command_pre='echo '
    command_middle_1=';cd ~/Downloads/opticalflow; matlab -nojvm -nodisplay -nosplash -r "out_folder=\''
    command_middle='\';saveTrainingData" > '
    command_end=' 2>&1';

    commands=[];
    for dir_curr in all_dirs:
        dir_curr=util.escapeString(dir_curr);
        log_file=os.path.join(dir_curr,'log.txt');
        command=command_pre+dir_curr+command_middle_1+dir_curr+command_middle+log_file+command_end;
        commands.append(command);
    
    idx_range=util.getIdxRange(len(commands),len(commands)/num_proc)
    command_files=[];
    for i,start_idx in enumerate(idx_range[:-1]):
        command_file_curr=command_file_pre+str(i)+'.txt'
        end_idx=idx_range[i+1]
        commands_rel=commands[start_idx:end_idx];
        util.writeFile(command_file_curr,commands_rel);
        command_files.append(command_file_curr);
    return command_files;
示例#17
0
def script_saveMatFiles(flo_dir,im_dir,out_dir,mat_file,proto_file):
    #get video name
    video_name=flo_dir[:-1];
    video_name=video_name[video_name.rindex('/')+1:];
    print video_name

    #get flo files
    flo_files=[os.path.join(flo_dir,file_curr) for file_curr in os.listdir(flo_dir) if file_curr.endswith('.flo')];
    flo_files.sort();

    #get im files
    im_files=util.readLinesFromFile(os.path.join(flo_dir,'im_1.txt'));
    old_dir=im_files[0][:im_files[0].rindex('/')+1];
    
    #if dirs have changed, replace the paths
    if im_dir!=old_dir:
        im_files=[im_curr.replace(old_dir,im_dir) for im_curr in im_files];

    #get batch size
    batch_size=getBatchSizeFromDeploy(os.path.join(flo_dir,proto_file));

    #get batch info
    batch_num=[int(file_curr[file_curr.rindex('-')+1:file_curr.rindex('(')]) for file_curr in flo_files];
    batch_num=np.array(batch_num);
    batch_ids=list(set(batch_num))
    batch_ids.sort();
    
    flo_files_all = [];
    im_files_all = []
    for batch_no in batch_ids:
        idx_rel=np.where(batch_num==batch_no)[0];
        
        flo_files_curr=[];
        im_files_curr=[];
        for idx_curr in idx_rel:
            flo_file=flo_files[idx_curr];
            im_no=int(flo_file[flo_file.rindex('(')+1:flo_file.rindex(')')]);
            im_corr=im_files[batch_no*batch_size+im_no];
            flo_files_curr.append(flo_file);
            im_files_curr.append(im_corr);
        
        flo_files_all.append(flo_files_curr);
        im_files_all.append(im_files_curr);

    #save as mat with flofiles, im_files, and out_dir;
    for idx_batch_no,batch_no in enumerate(batch_ids):
        flo_files=flo_files_all[idx_batch_no];
        im_files=im_files_all[idx_batch_no];

        out_dir_mat = os.path.join(out_dir,video_name+'_'+str(batch_no));
        # print out_dir_mat

        if not os.path.exists(out_dir_mat):
            os.mkdir(out_dir_mat);
        out_file=os.path.join(out_dir_mat,mat_file);
        print out_file
        mat_data={'flo_files':flo_files,'im_files':im_files}
        
        scipy.io.savemat(out_file,mat_data)
示例#18
0
def readPredFile(pred_file):
    lines = util.readLinesFromFile(pred_file)
    annos_all = []
    for line_curr in lines:
        annos_str = line_curr.split()
        annos = parseAnnoStr(annos_str)
        annos_all.append(annos)
    return annos_all
示例#19
0
def script_writeFlownetCommands(params):
    video_list_file = params.video_list_file
    path_to_video_meta = params.path_to_video_meta
    in_dir_meta = params.in_dir_meta
    out_dir_meta = params.out_dir_meta
    path_to_deploy = params.path_to_deploy
    out_file_commands = params.out_file_commands
    dir_flownet_meta = params.dir_flownet_meta
    path_to_sizer = params.path_to_sizer
    caffe_bin = params.caffe_bin
    path_to_model = params.path_to_model
    text_1_org = params.text_1
    text_2_org = params.text_2
    deploy_file = params.deploy_file
    gpu = params.gpu

    im_dirs = util.readLinesFromFile(video_list_file)
    im_dirs = [
        im_dir.replace(path_to_video_meta, in_dir_meta)[:-4]
        for im_dir in im_dirs
    ]

    commands = []
    # im_dirs=['/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data/hmdb/pick/THE_WALLET_TRICK!!!_pick_f_cm_np2_ba_med_1'];
    for idx_im_dir, im_dir in enumerate(im_dirs):
        print idx_im_dir, len(im_dirs)
        out_dir_curr = im_dir.replace(in_dir_meta, out_dir_meta)
        text_1 = os.path.join(out_dir_curr, text_1_org)
        text_2 = os.path.join(out_dir_curr, text_2_org)
        out_deploy = os.path.join(out_dir_curr, deploy_file)

        subprocess.call('mkdir -p ' + util.escapeString(out_dir_curr),
                        shell=True)

        list_1, list_2 = getImageListForFlow(im_dir)
        util.writeFile(text_1, list_1)
        util.writeFile(text_2, list_2)

        # im_test=util.escapeString(list_1[0]);
        dim_list = [
            int(dimstr) for dimstr in str(
                subprocess.check_output([path_to_sizer, list_1[0]])).split(',')
        ]
        replaceProto(path_to_deploy, out_deploy, dim_list, text_1, text_2,
                     len(list_1), out_dir_curr)

        args = [
            caffe_bin, 'test', '-model',
            util.escapeString(out_deploy), '-weights', path_to_model,
            '-iterations', '1', '-gpu',
            str(gpu)
        ]

        cmd = str.join(' ', args)
        commands.append(cmd)

    # print('Executing %s' % cmd)
    util.writeFile(out_file_commands, commands)
def convertFileToFloOnly(neg_flo,out_file_neg):
    neg_flo=util.readLinesFromFile(neg_flo);
    neg_only_flo=[];
    for neg_flo_curr in neg_flo:
        neg_flo_curr=neg_flo_curr.split(' ');
        neg_only_flo.append(neg_flo_curr[-1]+' '+neg_flo_curr[1]);

    assert len(neg_only_flo)==len(neg_flo);
    util.writeFile(out_file_neg,neg_only_flo);
示例#21
0
def getNumNeighbors(file_horse):
    lines=util.readLinesFromFile(file_horse);
    lines=np.array(lines);
    uni_lines=np.unique(lines);
    counts=np.zeros(uni_lines.shape);
    for idx_uni_curr,uni_curr in enumerate(uni_lines):
        if idx_uni_curr%500==0:
            print idx_uni_curr;
        counts[idx_uni_curr]=np.sum(lines==uni_curr);
    return uni_lines,counts;
示例#22
0
def writeTrainFilesWithFlow(old_train_file,dir_flo_im,new_train_file,ext='.png'):
    lines=util.readLinesFromFile(old_train_file);
    img_files=[line[:line.index(' ')] for line in lines];
    file_names=util.getFileNames(img_files,ext=False);
    flo_im_files=[os.path.join(dir_flo_im,file_name+ext) for file_name in file_names];
    for flo_im_file in flo_im_files:
        assert os.path.exists(flo_im_file);

    lines_new=[line+' '+flo_im_curr for line,flo_im_curr in zip(lines,flo_im_files)];
    util.writeFile(new_train_file,lines_new);
示例#23
0
def main():
    data_dir = '../../../../data/ck_96/train_test_files'
    out_dir = '../../../../data/ck_96/train_test_files_tfrecords'
    im_path_prepend = '../../../'
    util.mkdir(out_dir)

    split_num = 0

    train_file = os.path.join(data_dir, 'train_' + str(split_num) + '.txt')
    test_file = os.path.join(data_dir, 'test_' + str(split_num) + '.txt')
    mean_file = os.path.join(data_dir, 'train_' + str(split_num) + '_mean.png')
    std_file = os.path.join(data_dir, 'train_' + str(split_num) + '_std.png')

    mean_im = scipy.misc.imread(mean_file).astype(np.float32)
    std_im = scipy.misc.imread(std_file).astype(np.float32)
    std_im[std_im == 0] = 1

    for in_file in [train_file, test_file]:

        out_file = os.path.join(
            out_dir,
            os.path.split(in_file)[1].replace('.txt', '.tfrecords'))
        print in_file, out_file

        lines = util.readLinesFromFile(in_file)
        random.shuffle(lines)

        writer = tf.python_io.TFRecordWriter(out_file)
        for idx_line, line in enumerate(lines):
            if idx_line % 100 == 0:
                print idx_line, line

            im_path, label = line.split(' ')
            label = int(label)
            im_path = os.path.join(im_path_prepend, im_path)

            img = load_image(im_path, mean_im, std_im)
            # print img.shape,np.min(img),np.max(img)

            feature = {
                'image_raw':
                _bytes_feature(tf.compat.as_bytes(img.tostring())),
                'label': _int64_feature(label),
                'height': _int64_feature(img.shape[0]),
                'width': _int64_feature(img.shape[1]),
                'depth': _int64_feature(img.shape[2])
            }

            example = tf.train.Example(features=tf.train.Features(
                feature=feature))

            # Serialize to string and write on the file
            writer.write(example.SerializeToString())

        writer.close()
def main():
	pos_data_txt='/disk2/februaryExperiments/deep_proposals/positive_data.txt';
	num_to_keep=100;
	out_file_db='/disk2/februaryExperiments/deep_proposals/positive_data_'+str(num_to_keep)+'.hdf5';

	lines=util.readLinesFromFile(pos_data_txt);
	ims=[];
	masks=[];
	for line in lines:
		idx_space=line.index(' ');
		im=line[:idx_space];
		mask=line[idx_space+1:];
		ims.append(im);
		masks.append(mask);
	
	if num_to_keep is None:
		num_to_keep=len(ims);

	ims=ims[:num_to_keep];
	masks=masks[:num_to_keep];

	f=h5py.File(out_file_db,'w')
	
	canoncial_shape=(240,240);	
	dset = f.create_dataset('images', (len(ims),3,canoncial_shape[0],canoncial_shape[1]), dtype='i')
	dset = f.create_dataset('masks', (len(masks),1,canoncial_shape[0],canoncial_shape[1]), dtype='i')


	for idx in range(len(ims)):
		print idx
		img=scipy.misc.imread(ims[idx]);
		# print img.shape
		if len(img.shape)<3:
			img=np.dstack((img,img,img))
		# print img.shape
		f['/images'][idx,...]=reshapeForDB(img,canoncial_shape,addDim=False);		
		f['/masks'][idx,...]=reshapeForDB(scipy.misc.imread(masks[idx]),canoncial_shape,addDim=True);
		# print np.min(im_curr),np.max(im_curr);
		# print im_curr.shape

		# mask_curr=scipy.misc.imread(masks[idx]);
		# mask_curr=np.expand_dims(mask_curr,3)
		# mask_curr=np.transpose(mask_curr,(2,0,1));
		# print np.min(mask_curr),np.max(mask_curr);
		# print mask_curr.shape

		# f['/images'][idx,...]=im_curr;
		# f['/masks'][idx,...]=mask_curr;

	f.close();
	print len(ims);
	print ims[:3];
	print len(masks);
	print masks[:3];
示例#25
0
def writeTrainValTxtExcluded(train_new_text,val_new_text,out_file_text,training_data_text,percent_exclude):

    lines=util.readLinesFromFile(out_file_text);
    info=[tuple(line_curr.split(' ')) for line_curr in lines];
    class_rec={};
    for dataset,video, in info:
        if dataset=='youtube':
            video_split=video.split('_');
            class_curr=video_split[0];
            if class_curr in class_rec:
                class_rec[class_curr].append(video);
            else:
                class_rec[class_curr]=[video];

    list_exclude_all=[];
    for class_curr in class_rec.keys():
        num_exclude=int(math.ceil(len(class_rec[class_curr])*percent_exclude));
        list_shuffle=class_rec[class_curr];
        random.shuffle(list_shuffle);
        list_exclude=list_shuffle[:num_exclude];
        list_exclude_all=list_exclude_all+list_exclude;


    lines=util.readLinesFromFile(training_data_text);
    # print len(lines);
    lines_to_keep=[];
    lines_to_exclude=[];
    for line in lines:
        img=line[:line.index(' ')];
        img_split=img.split('/');
        if img_split[3]=='youtube' and (img_split[4] in list_exclude_all):
            lines_to_exclude.append(line);
            # print img
            continue;
        else:
            lines_to_keep.append(line);
            
    print len(lines_to_keep),len(lines_to_exclude),len(lines),len(lines_to_keep)+len(lines_to_exclude)

    util.writeFile(train_new_text,lines_to_keep);
    util.writeFile(val_new_text,lines_to_exclude);
示例#26
0
def getCommandFaceTest(path_to_th,out_dir,file_curr,batch_size=100):
    command=['th',path_to_th];
    command=command+['-val_data_path',file_curr];
    command=command+['-outDir',out_dir];
    command=command+['-batchSize',str(batch_size)];
    amount_data=len(util.readLinesFromFile(file_curr));
    num_iterations=amount_data/batch_size;
    if amount_data%batch_size!=0:
        num_iterations=num_iterations+1;
    command=command+['-iterations',str(num_iterations)];
    command=' '.join(command);
    return command;
def writeh5ImgFile(dir_neg,out_file_match):

    lines=[];
    h5_files=[os.path.join(dir_neg,file_curr) for file_curr in os.listdir(dir_neg) if file_curr.endswith('.h5')];
    print len(h5_files)
    for idx_file_curr,file_curr in enumerate(h5_files):
        if idx_file_curr%100==0:
            print idx_file_curr
        img_file=util.readLinesFromFile(file_curr.replace('.h5','.txt'))[0].strip();
        # print file_curr,img_file
        lines.append(file_curr+' '+img_file);

    util.writeFile(out_file_match,lines);
def script_writeNegFile():


    dir_flow='/disk2/aprilExperiments/deep_proposals/flow/results_neg'
    out_text='/disk2/aprilExperiments/deep_proposals/flow/test_neg.txt';
    # util.mkdir(dir_flow);

    neg_text='/disk2/marchExperiments/deep_proposals/negatives.txt';
    lines=util.readLinesFromFile(neg_text);
    neg_images=[line_curr[:line_curr.index(' ')] for line_curr in lines];
    neg_images=neg_images[:100];
    to_write=[neg_image+' 1' for neg_image in neg_images]
    util.writeFile(out_text,to_write);
示例#29
0
def getDataAndLabels(data_path, batchSize, numBatches, fc6_dir):
    lines = util.readLinesFromFile(data_path)
    out_data = np.zeros((len(lines), 4096))
    start_curr = 0
    for i in range(numBatches):
        file_curr = os.path.join(fc6_dir,
                                 str(i + 1) + '.npy')
        fc6_curr = np.load(file_curr)
        end_curr = min(start_curr + fc6_curr.shape[0], len(lines))
        len_curr = end_curr - start_curr
        out_data[start_curr:end_curr, :] = fc6_curr[:len_curr]
        start_curr = end_curr
    return out_data, lines
def main():

	# out_dir_meta='/disk2/mayExperiments/validation';
	# val_file=os.path.join(out_dir_meta,'val.txt');
	out_dir_meta='/disk2/mayExperiments/train_data';
	val_file=os.path.join(out_dir_meta,'train.txt');
	out_dir=os.path.join(out_dir_meta,'rescaled_images');
	# util.mkdir(out_dir);

	img_paths=util.readLinesFromFile(val_file);
	# print len(img_paths);
	# img_paths=img_paths[:3];
	rescaleImAndSaveMeta(img_paths,out_dir);
def saveFloAsNpPred((h5_file,dir_flo_im,replace_paths,C,idx)):
    print idx;
    img_file=util.readLinesFromFile(h5_file.replace('.h5','.txt'))[0].strip();
    img_file=img_file.replace(replace_paths[0],replace_paths[1]);
    img_name=img_file[img_file.rindex('/')+1:img_file.rindex('.')];
    out_file_flo=os.path.join(dir_flo_im,img_name+'.npy');
        
    if os.path.exists(out_file_flo):
        return;

    flo=getFlowMat(h5_file,C);
    im=scipy.misc.imread(img_file);
    flo_resize=resizeSP(flo,im.shape);
    np.save(out_file_flo,flo_resize);
示例#32
0
def main():
    dir_server='/home/SSD3/maheen-data';
    click_str='http://vision1.idav.ucdavis.edu:1000';
    
    out_dir_meta_data='/home/SSD3/maheen-data/horse_project/data_check';
    dir_neighbors='/home/SSD3/maheen-data/horse_project/neighbor_data';
    matches_file=os.path.join(dir_neighbors,'horse_trainImageList_2_data_100_neigbors.txt');
    
    out_dir_debug=os.path.join(dir_neighbors,'debug');
    out_dir_breakdowns=os.path.join(dir_neighbors,'small_datasets');
    file_pre='matches';
    util.mkdir(out_dir_breakdowns);
    util.mkdir(out_dir_debug);
    
    out_file_counts=os.path.join(out_dir_debug,'counts.npz');
    out_file_dist=os.path.join(out_dir_debug,'counts_dist.png');
    
    out_dir_meta_horse = os.path.join(out_dir_meta_data,'horse');
    out_dir_meta_face = os.path.join(out_dir_meta_data,'aflw');
    
    num_neighbors=100;
    out_file_face=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_allKP.txt');
    out_file_face_noIm=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_allKP_noIm.txt');
    out_file_horse=os.path.join(out_dir_meta_horse,'matches_'+str(num_neighbors)+'_train_allKP.txt');
    
    old_horse_file='/home/SSD3/maheen-data/horse_project/data_check/horse/matches_5_train_allKP_minLoss_clean_full.txt';
    old_data=util.readLinesFromFile(old_horse_file);
    old_data=np.array(old_data);
    old_data=np.unique(old_data);

#     new_data,counts=getNumNeighbors(out_file_horse);
#     np.savez(out_file_counts,new_data=new_data,counts=counts);
    
    data=np.load(out_file_counts);
    print data.files;
    new_data=data['new_data'];
    counts=data['counts'];
    num_data=range(500,len(old_data),500);
    num_data[-1]=len(old_data);
    
    num_neighbors=range(5,25,5);
    np.random.shuffle(old_data);
    for num_neighbor in num_neighbors:
        for num_data_curr in num_data:
            file_curr=file_pre+'_'+str(num_neighbor)+'_'+str(num_data_curr);
            out_file_pre=os.path.join(out_dir_breakdowns,file_curr);
            files=writeSmallDatasetFile(out_file_pre,old_data,num_neighbor,num_data_curr,out_file_horse,\
                                        out_file_face,out_file_face_noIm);
            for file_curr in files:
                print file_curr;
def script_testOnYoutube():
    val_file='/disk2/mayExperiments/finetuning_youtube_hmdb_llr/val_eq.txt'
    out_dir='/disk2/mayExperiments/eval_ucf_finetune';
    clusters_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat';
    gpu=0;

    util.mkdir(out_dir);
    # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/final.caffemodel';
    # out_dir_model=os.path.join(out_dir,'original_model');

    model_file='/disk2/mayExperiments/ft_youtube_hmdb_ucfClusters/OptFlow_youtube_hmdb__iter_55000.caffemodel';
    out_dir_model=os.path.join(out_dir,'ft_ucf_model');

    util.mkdir(out_dir_model);
    out_dir_flo=os.path.join(out_dir_model,'flo');
    out_dir_flo_viz=os.path.join(out_dir_model,'flo_viz');
    util.mkdir(out_dir_flo);util.mkdir(out_dir_flo_viz)

    num_to_pick=20;

    img_paths=util.readLinesFromFile(val_file);
    img_paths=[img_path[:img_path.index(' ')] for img_path in img_paths];
    class_names=[file_curr[:file_curr.index('_')] for file_curr in util.getFileNames(img_paths)];
    classes=list(set(class_names));
    class_names=np.array(class_names);
    
    img_paths_test=[];
    for class_curr in classes:
        idx_rel=np.where(class_names==class_curr)[0];
        idx_rel=idx_rel[:num_to_pick];
        img_paths_test.extend([img_paths[idx_curr] for idx_curr in idx_rel]);

    # po.script_saveFlosAndViz(img_paths_test,out_dir_flo,out_dir_flo_viz,gpu,model_file,clusters_file);

    out_file_html=os.path.join(out_dir,'model_comparison.html');
    out_dirs_flo_viz=[os.path.join(out_dir,'original_model','flo_viz'),os.path.join(out_dir,'ft_ucf_model','flo_viz')];
    out_dirs_flo_viz_captions=['original_model','ft_ucf_model'];
    img_paths_html=[];
    captions_html=[];
    img_names=util.getFileNames(img_paths_test,ext=False);
    for img_path_test,img_name in zip(img_paths_test,img_names):
        row_curr=[];
        row_curr.append(util.getRelPath(img_path_test));
        for out_dir_curr in out_dirs_flo_viz:
            file_curr=os.path.join(out_dir_curr,img_name+'.png');
            row_curr.append(util.getRelPath(file_curr));
        captions_curr=[img_name]+out_dirs_flo_viz_captions;
        img_paths_html.append(row_curr)
        captions_html.append(captions_curr);
    visualize.writeHTML(out_file_html,img_paths_html,captions_html);
示例#34
0
def getMovedDirPath(moved_dir, orig_dir, sub_dirs_file_name):
    sub_dirs = [
        util.readLinesFromFile(os.path.join(dir_curr, sub_dirs_file_name))
        for dir_curr in [moved_dir, orig_dir]
    ]
    full_paths = [
        os.path.join(orig_dir, dir_curr) for dir_curr in sub_dirs[1]
        if dir_curr not in sub_dirs[0]
    ]
    full_paths = full_paths + [
        os.path.join(moved_dir, dir_curr) for dir_curr in sub_dirs[0]
    ]
    list_intersection = set(sub_dirs[0] + sub_dirs[1])
    return full_paths
示例#35
0
def saveMinEqualFrames(train_new_text,out_file_idx,out_file_eq,includeHuman=True):
    lines=util.readLinesFromFile(train_new_text);
    img_paths=[line[:line.index(' ')] for line in lines];
    p=multiprocessing.Pool(multiprocessing.cpu_count());
    vals=p.map(getDataSetAndVideoName,img_paths);
    [dataset,video]=zip(*vals)
    dataset=np.array(dataset);
    print np.unique(dataset);

    frame_idx_rec={};
    if includeHuman:
        frame_idx_rec['human']=list(np.where(dataset=='hmdb_try_2')[0]);

    for idx,video_curr in enumerate(video):
        if dataset[idx]=='youtube':
            class_curr=video_curr[:video_curr.index('_')];
            if class_curr in frame_idx_rec:
                frame_idx_rec[class_curr].append(idx);
            else:
                frame_idx_rec[class_curr]=[idx];

    for class_curr in frame_idx_rec.keys():
        print class_curr,len(frame_idx_rec[class_curr]);


    min_frames=min([len(val_curr) for val_curr in frame_idx_rec.values()]);
    print 'min_frames',min_frames

    idx_to_pick=[];

    for class_curr in frame_idx_rec.keys():
        idx_curr=frame_idx_rec[class_curr];
        random.shuffle(idx_curr);
        idx_to_pick.extend(idx_curr[:min_frames]);

        # print class_curr,len(frame_idx_rec[class_curr]);

    idx_all=[idx_curr for idx_curr_all in frame_idx_rec.values() for idx_curr in idx_curr_all];
    print len(idx_all),len(lines);
    assert len(idx_all)==len(lines);

    idx_all.sort();
    print  idx_all==list(range(len(lines)));
    assert idx_all==list(range(len(lines)));
    lines_to_keep=[lines[idx_curr] for idx_curr in idx_to_pick];
    print len(lines_to_keep);

    np.save(out_file_idx,np.array(idx_to_pick))
    util.writeFile(out_file_eq,lines_to_keep);
示例#36
0
def script_writeHumanOnlyNegFile():
    neg_file_old='/disk2/marchExperiments/deep_proposals/negatives.txt'
    neg_file_new='/disk2/marchExperiments/deep_proposals/negatives_onlyHuman.txt'

    npy_dir_old='/disk2/marchExperiments/deep_proposals/negatives'
    npy_dir_new='/disk2/aprilExperiments/negatives_npy_onlyHuman'

    lines=util.readLinesFromFile(neg_file_old);
    lines_new=[line.replace(npy_dir_old,npy_dir_new) for line in lines];
    for line in lines_new:
        assert npy_dir_new in line;

    print len(lines),len(lines_new);
    print lines_new[0];
    util.writeFile(neg_file_new,lines_new);
def getOutputInfoMP((list_file,out_files_test)):
    img_file = util.readLinesFromFile(list_file.replace('.h5','.txt'))[0].strip();
    if out_files_test is not None and img_file not in out_files_test:
        line_curr=None;
    else: 
        im=scipy.misc.imread(img_file);
        if len(im.shape)>2:
            str_size=[im.shape[0],im.shape[1],im.shape[2]];
        else:
            str_size=[im.shape[0],im.shape[1],1];
        str_size=[str(i) for i in str_size]
        line_curr=[list_file,img_file]+str_size;
        line_curr=' '.join(line_curr);
    
    return line_curr;        
def script_visualizeFlos(params):
    out_file_html = params.out_file_html
    out_dir_flo_im = params.out_dir_flo_im
    flo_dir = params.flo_dir
    im_file_1 = params.im_file_1
    im_file_2 = params.im_file_2
    height = params.height
    width = params.width
    rel_path = params.rel_path

    list_1=util.readLinesFromFile(im_file_1);
    list_2=util.readLinesFromFile(im_file_2);
    flo_files=[os.path.join(flo_dir,file_curr) for file_curr in os.listdir(flo_dir) if file_curr.endswith('.flo')];
    flo_nums=[int(file_curr[file_curr.rindex('(')+1:file_curr.rindex(')')]) for file_curr in flo_files];
    print len(list_1),len(list_2),len(flo_files);
    flo_files_sorted=[];
    for idx_flo in range(len(flo_nums)):
        idx_curr=flo_nums.index(idx_flo);
        flo_files_sorted.append(flo_files[idx_curr]);
    
    im_list=[];
    caption_list=[];
    # for idx_flo,flo_file_curr in enumerate(flo_files_sorted):
    for idx_flo in range(len(list_1)):
        flo_file_curr=flo_files_sorted[idx_flo];
        out_flo_x=os.path.join(out_dir_flo_im,str(idx_flo)+'_x.png');
        out_flo_y=os.path.join(out_dir_flo_im,str(idx_flo)+'_y.png');
        flo=util.readFlowFile(flo_file_curr);
        visualize.visualizeFlo(flo,out_flo_x,out_flo_y);
        im_file_rel_1=list_1[idx_flo].replace(rel_path[0],rel_path[1]);
        im_file_rel_2=list_2[idx_flo].replace(rel_path[0],rel_path[1]);
        flo_file_rel_1=out_flo_x.replace(rel_path[0],rel_path[1]);
        flo_file_rel_2=out_flo_y.replace(rel_path[0],rel_path[1]);
        im_list.append([im_file_rel_1,im_file_rel_2,flo_file_rel_1,flo_file_rel_2]);
        caption_list.append(['Image 1','Image 2','Flow x','Flow y']);
    visualize.writeHTML(out_file_html,im_list,caption_list,height=height,width=width)
def writeTrainingDataFiles(dir_content_file,pre_dir,img_dir,out_file_text,ignore_amount=-2,postfix='.jpg'):
    start_idx=len(pre_dir);
    files=util.readLinesFromFile(dir_content_file);
    lines_to_write=[];

    for idx_file_curr,file_curr in enumerate(files):
        if idx_file_curr%1000==0:
            print idx_file_curr
        file_name=file_curr[start_idx+1:];
        file_name=file_name.split('_');
        file_name='_'.join(file_name[:ignore_amount]);
        file_name=file_name+postfix;
        file_name=os.path.join(img_dir,file_name);
        lines_to_write.append(file_name+' '+file_curr);
    util.writeFile(out_file_text,lines_to_write);
示例#40
0
def writeMinLossFile(out_file_pre,post_tags,minloss_post,old_horse_file,old_human_file,old_human_file_noIm):
    new_files=[out_file_pre+post_tag_curr for post_tag_curr in post_tags];
    
    old_data=util.readLinesFromFile(old_horse_file);
    old_data=np.array(old_data);
    
    new_data=util.readLinesFromFile(new_files[0]);
    new_data=np.array(new_data);
    new_data_uni=np.unique(new_data);
    bin_keep=np.in1d(old_data,new_data_uni);
#     print bin_keep.shape,sum(bin_keep);
    old_files=[old_horse_file,old_human_file,old_human_file_noIm];
    new_files_write=[file_curr[:file_curr.rindex('.')]+minloss_post for file_curr in new_files];
    for old_file_curr,new_file_curr,new_file_org in zip(old_files,new_files_write,new_files):
        data_curr=util.readLinesFromFile(old_file_curr);
        data_curr=np.array(data_curr);
        data_keep=data_curr[bin_keep];
        print old_file_curr,new_file_curr,len(data_keep);
        print data_keep[0];
        new_file_org_data=util.readLinesFromFile(new_file_org);
        new_file_org_data=np.array(new_file_org_data);
        bin_check=np.in1d(data_keep,new_file_org_data);
        print sum(bin_check),data_keep.shape[0];
        assert sum(bin_check)==data_keep.shape[0];
def script_writeFlownetCommands(params):
    video_list_file= params.video_list_file;
    path_to_video_meta= params.path_to_video_meta;
    in_dir_meta= params.in_dir_meta;
    out_dir_meta= params.out_dir_meta;
    path_to_deploy= params.path_to_deploy;
    out_file_commands= params.out_file_commands;
    dir_flownet_meta= params.dir_flownet_meta;
    path_to_sizer= params.path_to_sizer;
    caffe_bin = params.caffe_bin;
    path_to_model = params.path_to_model;
    text_1_org= params.text_1;
    text_2_org= params.text_2;
    deploy_file= params.deploy_file;
    gpu= params.gpu;

    im_dirs=util.readLinesFromFile(video_list_file);
    im_dirs=[im_dir.replace(path_to_video_meta,in_dir_meta)[:-4] for im_dir in im_dirs];
    
    commands=[];
    # im_dirs=['/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data/hmdb/pick/THE_WALLET_TRICK!!!_pick_f_cm_np2_ba_med_1'];
    for idx_im_dir,im_dir in enumerate(im_dirs):
        print idx_im_dir,len(im_dirs);
        out_dir_curr=im_dir.replace(in_dir_meta,out_dir_meta);
        text_1=os.path.join(out_dir_curr,text_1_org);
        text_2=os.path.join(out_dir_curr,text_2_org);
        out_deploy=os.path.join(out_dir_curr,deploy_file);

        subprocess.call('mkdir -p '+util.escapeString(out_dir_curr), shell=True)
        
        list_1,list_2 = getImageListForFlow(im_dir)
        util.writeFile(text_1,list_1);
        util.writeFile(text_2,list_2);
        
        # im_test=util.escapeString(list_1[0]);
        dim_list = [int(dimstr) for dimstr in str(subprocess.check_output([path_to_sizer, list_1[0]])).split(',')]
        replaceProto(path_to_deploy,out_deploy,dim_list,text_1,text_2,len(list_1),out_dir_curr)
        
        args = [caffe_bin, 'test', '-model', util.escapeString(out_deploy),
            '-weights', path_to_model,
            '-iterations', '1',
            '-gpu', str(gpu)]

        cmd = str.join(' ', args)
        commands.append(cmd);

    # print('Executing %s' % cmd)
    util.writeFile(out_file_commands,commands);
def parseInfoFile(out_file_text,lim=None):
    lines=util.readLinesFromFile(out_file_text);
    if lim is not None:
        lines=lines[:lim];

    h5_files=[];
    img_files=[];
    img_sizes=[];
    
    for line_curr in lines:
        str_split=line_curr.split(' ');
        h5_files.append(str_split[0]);
        img_files.append(str_split[1]);
        img_sizes.append(tuple([int(i) for i in str_split[2:]]));

    return h5_files,img_files,img_sizes
示例#43
0
def writeClassTextFile(train_val_txt, path_to_im, out_file):
    lines = util.readLinesFromFile(train_val_txt)
    pos_im = []
    lines_split = [line.split(' ', 1) for line in lines]
    for idx, line_split in enumerate(lines_split):
        num = int(line_split[1])

    pos_im = [
        line_split[0] for line_split in lines_split if int(line_split[1]) >= 0
    ]
    ims = [
        os.path.join(path_to_im, pos_im_curr + '.jpg')
        for pos_im_curr in pos_im
    ]

    util.writeFile(out_file, ims)
示例#44
0
def script_saveFloAndVizFromTestFile(neg_file,out_dir,gpu=0):
    pos_lines=util.readLinesFromFile(neg_file);
    pos_img_files=[line[:line.index(' ')] for line in pos_lines];
    
    print len(pos_img_files);
    print pos_img_files[0];

    gpu=1;
    model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/final.caffemodel'
    clusters_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction_test/examples/opticalflow/clusters.mat';

    dir_flo=os.path.join(out_dir,'flo');
    util.mkdir(dir_flo);
    dir_flo_viz=os.path.join(out_dir,'flo_viz');
    util.mkdir(dir_flo_viz);

    po.script_saveFlosAndViz(pos_img_files,dir_flo,dir_flo_viz,gpu,model_file,clusters_file,overwrite=True);
示例#45
0
def us_getFilePres(gt_file, out_dir_us, post_us, num_iter, batch_us):
    files_gt = []
    files_pred = []
    im_paths = util.readLinesFromFile(gt_file)
    im_paths = [im_path[:im_path.index(' ')] for im_path in im_paths]
    num_gt = len(im_paths)
    count = 0
    for batch_num in range(num_iter):
        for im_num in range(batch_us):
            file_pre = str(batch_num + 1) + '_' + str(im_num + 1)
            file_gt = file_pre + post_us[0]
            file_pred = file_pre + post_us[1]
            files_gt.append(os.path.join(out_dir_us, file_gt))
            files_pred.append(os.path.join(out_dir_us, file_pred))
    files_gt = files_gt[:num_gt]
    files_pred = files_pred[:num_gt]
    return im_paths, files_gt, files_pred
示例#46
0
def modifyHumanFile(orig_file, new_file):
    data = util.readLinesFromFile(orig_file)
    data = [
        tuple([idx] + data_curr.split(' '))
        for idx, data_curr in enumerate(data)
    ]
    p = multiprocessing.Pool(multiprocessing.cpu_count())
    new_lines = p.map(modifyHumanFileMultiProc, data)
    # new_lines=[];
    # for idx,(im_file,npy_file) in enumerate(data):
    #     print idx,len(data);
    #     im=scipy.misc.imread(im_file);
    #     im_size=im.shape;
    #     line_curr=npy_file+' '+str(im.shape[0])+' '+str(im.shape[1]);
    #     new_lines.append(line_curr);
    print len(new_lines)
    print new_lines[0]
    util.writeFile(new_file, new_lines)
示例#47
0
def writeMetaInfoToDb(path_to_db,out_files,idx_global,class_ids_all,path_to_data):
    mani=Tube_Manipulator(path_to_db);
    
    mani.openSession();
    for out_file_idx,out_file in enumerate(out_files):
        if out_file_idx%100==0:
            print out_file_idx,len(out_files)
        in_file_text=out_file.replace('.npz','.txt');
        patch_files=util.readLinesFromFile(in_file_text);
        # print out_file,in_file_text,len(patch_files);
        
        
        for idx_img_file,img_file in enumerate(patch_files):
            img_path=img_file;
            
            img_path_split=img_path.split('/');
            img_path_split=[segment for segment in img_path_split if segment!=''];
            mat_name=img_path_split[-3];
            
            class_id_pascal=mat_name[:mat_name.index('_')];
            
            video_id=int(mat_name[mat_name.index('_')+1:mat_name.rindex('_')]);
            shot_id=int(mat_name[mat_name.rindex('_')+1:]);
            tube_id=int(img_path_split[-2]);

            frame_id=img_path_split[-1];
            frame_id=int(frame_id[:frame_id.index('.')]);
            # frame_id+=1

            class_idx_pascal=class_ids_all.index(class_id_pascal);
            deep_features_path=out_file;
            deep_features_idx=idx_img_file;
            layer='fc7';

            frame_path=getFramePath(path_to_data,class_id_pascal,video_id,shot_id,frame_id+1)
            assert os.path.exists(frame_path);
            
            

            mani.insert(idx_global, img_path, frame_id, video_id, tube_id, shot_id, frame_path=frame_path, layer=layer, deep_features_path=deep_features_path, deep_features_idx=deep_features_idx, class_id_pascal=class_id_pascal, class_idx_pascal=class_idx_pascal,commit=False);
            idx_global+=1;
    mani.session.commit();
    mani.closeSession();
    return idx_global;
示例#48
0
def readGTFile(file_curr):
    lines = util.readLinesFromFile(file_curr)
    im_paths = []
    ims_size = []
    annos_all = []
    for line_curr in lines:
        line_split = line_curr.rsplit(None, 14)
        im_paths.append(line_split[0])

        im_size = line_split[1:1 + 4]
        im_size = [int(num) for num in im_size]
        im_size = [im_size[2] - im_size[0], im_size[3] - im_size[1]]
        ims_size.append(im_size)

        annos = line_split[1 + 4:]
        annos = parseAnnoStr(annos)
        annos_all.append(annos)

    return im_paths, ims_size, annos_all
def script_writeHTMLStitchedFlos(out_file_html,out_file,out_dir,grid_sizes=[1,2,4,8],grid_dir_pre='grid_flo_viz_'):
    img_paths=util.readLinesFromFile(out_file);
    
    viz_dirs=[os.path.join(out_dir,grid_dir_pre+str(num)) for num in grid_sizes];
    img_paths_html=[];
    captions=[];

    for img_path in img_paths:
        img_name=img_path[img_path.rindex('/')+1:img_path.rindex('.')];
        img_paths_html_curr=[util.getRelPath(img_path)];
        captions_curr=['im']
        for viz_dir in viz_dirs:
            print viz_dir,img_path
            img_path_curr=[os.path.join(viz_dir,file_curr) for file_curr in os.listdir(viz_dir) if file_curr.startswith(img_name)][0];
            img_paths_html_curr.append(util.getRelPath(img_path_curr));
            captions_curr.append(viz_dir[viz_dir.rindex('/')+1:]);
        img_paths_html.append(img_paths_html_curr);
        captions.append(captions_curr)
    
    visualize.writeHTML(out_file_html,img_paths_html,captions);
示例#50
0
def writeUniqueTrainingDataInfo(training_data_text,out_file_text):

    lines=util.readLinesFromFile(training_data_text);
    img_paths=[line[:line.index(' ')] for line in lines];
    p=multiprocessing.Pool(multiprocessing.cpu_count());
    vals=p.map(getDataSetAndVideoName,img_paths);
    vals_uz=zip(*vals);
    datasets=np.array(vals_uz[0]);
    videos=np.array(vals_uz[1]);
    new_tuples=[];
    for dataset_curr in np.unique(datasets):
        idx_rel=np.where(datasets==dataset_curr)[0];
        videos_rel=videos[idx_rel];
        videos_rel=np.unique(videos_rel);
        for video_curr in videos_rel:
            tuple_curr=(dataset_curr,video_curr)
            new_tuples.append(tuple_curr);

    vals_uni=[' '.join(val_curr) for val_curr in new_tuples];
    util.writeFile(out_file_text,vals_uni);
示例#51
0
def script_saveDetectedImages():
    out_file = '/disk2/aprilExperiments/horses/list_of_frames.txt'
    out_dir_dets = '/disk2/aprilExperiments/horses/frames_with_detections'
    post_pend = '_horse_detections.npy'
    post_pend_im = '_horse_detections.png'
    class_name = 'horse'
    thresh = 0.5

    frames = util.readLinesFromFile(out_file)
    # frames=['/disk2/aprilExperiments/horses/mediaFromPPT_frames/media7_00014.jpg']
    for idx_frame_curr, frame_curr in enumerate(frames):
        if idx_frame_curr % 100 == 0:
            print idx_frame_curr, len(frames)

        frame_name = frame_curr[frame_curr.rindex('/') + 1:]
        frame_name = frame_name[:frame_name.rindex('.')]
        video_name = frame_name[:frame_name.index('_')]
        dets_file = os.path.join(out_dir_dets, video_name,
                                 frame_name + post_pend)
        out_file_im = dets_file.replace('.npy', '.png')
        saveDets(frame_curr, class_name, dets_file, out_file_im, thresh)
示例#52
0
def script_saveNonHumanOnlyNeg():
    neg_file='/disk2/marchExperiments/deep_proposals/negatives.txt';
    out_dir='/disk2/aprilExperiments/negatives_npy_onlyHuman'

    lines=util.readLinesFromFile(neg_file);
    npy_files=[line[line.index(' ')+1:] for line in lines];
    npy_file_names=util.getFileNames(npy_files);

    exists=0;
    for idx_npy_file_name,npy_file_name in enumerate(npy_file_names):
        if idx_npy_file_name%100==0:
            print idx_npy_file_name;

        file_curr=os.path.join(out_dir,npy_file_name);
        if os.path.exists(file_curr):
            exists+=1;
        else:
            zeros=np.zeros((0,4));
            np.save(file_curr,zeros);

    print exists,len(npy_file_names);
示例#53
0
def saveBBoxImage(out_file, path_to_anno, out_dir_im, class_name='horse'):
    files = util.readLinesFromFile(out_file)
    just_names = util.getFileNames(files, ext=False)
    annotations = [
        os.path.join(path_to_anno, just_name + '.xml')
        for just_name in just_names
    ]

    print len(annotations)
    for im_file, anno, just_name in zip(files, annotations, just_names):

        out_file_pre = os.path.join(out_dir_im, just_name + '_')
        obj = untangle.parse(anno)

        for idx_object_curr, object_curr in enumerate(obj.annotation.object):
            if object_curr.name.cdata == class_name:
                out_file = out_file_pre + str(idx_object_curr) + '.jpg'
                # if os.path.exists(out_file):
                # 	continue;

                bnd_box = [
                    object_curr.bndbox.xmin.cdata,
                    object_curr.bndbox.ymin.cdata,
                    object_curr.bndbox.xmax.cdata,
                    object_curr.bndbox.ymax.cdata
                ]
                bnd_box = [int(coord) for coord in bnd_box]

                # print bnd_box;

                im = scipy.misc.imread(im_file)
                if len(im.shape) < 3:
                    crop = im[bnd_box[1]:bnd_box[3], bnd_box[0]:bnd_box[2]]
                else:
                    crop = im[bnd_box[1]:bnd_box[3], bnd_box[0]:bnd_box[2], :]

                print out_file
                scipy.misc.imsave(out_file, crop)
def saveFlosFromValFile(out_dir_model,val_file,num_to_pick,model_file,clusters_file,gpu,train_val_file=None,overwrite=False):

    
    out_dir_flo=os.path.join(out_dir_model,'flo');
    out_dir_flo_viz=os.path.join(out_dir_model,'flo_viz');
    util.mkdir(out_dir_flo);util.mkdir(out_dir_flo_viz)



    img_paths=util.readLinesFromFile(val_file);
    img_paths=[img_path[:img_path.index(' ')] for img_path in img_paths];
    class_names=[file_curr[:file_curr.index('_')] for file_curr in util.getFileNames(img_paths)];
    classes=list(set(class_names));
    class_names=np.array(class_names);
    
    img_paths_test=[];
    for class_curr in classes:
        idx_rel=np.where(class_names==class_curr)[0];
        idx_rel=idx_rel[:num_to_pick];
        img_paths_test.extend([img_paths[idx_curr] for idx_curr in idx_rel]);

    po.script_saveFlosAndViz(img_paths_test,out_dir_flo,out_dir_flo_viz,gpu,model_file,clusters_file,train_val_file=train_val_file,overwrite=overwrite);
    return img_paths_test;
示例#55
0
def main():

    all_dirs_file = '/disk2/februaryExperiments/training_jacob/all_dirs.txt'
    command_file_pre = '/disk2/februaryExperiments/training_jacob/commands_training_data_'
    train_data_file = '/disk2/februaryExperiments/training_jacob/caffe_files/train.txt'
    check_file = 'done.mat'
    num_proc = 12
    # command_files = script_writeCommandsForPreprocessing(all_dirs_file,command_file_pre,num_proc,check_file);

    all_dirs = util.readLinesFromFile(all_dirs_file)
    # all_dirs=all_dirs[:10];
    random.shuffle(all_dirs)

    strings = []
    for no_dir_curr, dir_curr in enumerate(all_dirs):
        print no_dir_curr, dir_curr
        dir_curr = dir_curr[:-1]
        curr_flos = [
            os.path.join(dir_curr, curr_flo)
            for curr_flo in os.listdir(dir_curr) if curr_flo.endswith('.tif')
        ]
        for curr_flo in curr_flos:
            curr_im = curr_flo.replace('.tif', '.jpg')
            assert os.path.exists(curr_im)
            string_curr = curr_im + '  ' + curr_flo + ' '
            strings.append(string_curr)
    print len(strings)
    # print strings[:3];

    # random.shuffle(strings);
    util.writeFile(train_data_file, strings)
    # with open (train_data_file,'wb') as f:
    #     for im_curr,flo_curr in zip(ims,flos):
    #         string_curr=im_curr+' '+flo_curr+'\n';
    #         f.write(string_curr);

    return
    dirs = getRemainingDirs(util.readLinesFromFile(all_dirs_file), check_file)
    last_lines = []
    for dir_curr in dirs:
        last_lines.append(
            util.readLinesFromFile(os.path.join(dir_curr, 'log.txt'))[-2])
    print set(last_lines)

    return
    meta_dirs_image = [
        '/disk2/image_data_moved',
        '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data'
    ]
    meta_dirs_flo = [
        '/disk2/flow_data',
        '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data'
    ]
    sub_dirs_file = 'all_sub_dirs.txt'
    out_dir = '/disk2/februaryExperiments/training_jacob'
    out_file_correspondences = os.path.join(out_dir,
                                            'im_flo_correspondences.p')
    proto_file = 'deploy.prototxt'
    out_file = os.path.join(out_dir, 'im_flo_files.p')

    out_dir = '/disk2/februaryExperiments/training_jacob/training_data'
    mat_file = 'im_flo_files.mat'
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    im_flo_dirs = pickle.load(open(out_file_correspondences, 'rb'))
    [im_dirs, flo_dirs] = zip(*im_flo_dirs)

    for im_dir, flo_dir in im_flo_dirs:
        script_saveMatFiles(flo_dir, im_dir, out_dir, mat_file, proto_file)
示例#56
0
def script_saveMatFiles(flo_dir, im_dir, out_dir, mat_file, proto_file):
    #get video name
    video_name = flo_dir[:-1]
    video_name = video_name[video_name.rindex('/') + 1:]
    print video_name

    #get flo files
    flo_files = [
        os.path.join(flo_dir, file_curr) for file_curr in os.listdir(flo_dir)
        if file_curr.endswith('.flo')
    ]
    flo_files.sort()

    #get im files
    im_files = util.readLinesFromFile(os.path.join(flo_dir, 'im_1.txt'))
    old_dir = im_files[0][:im_files[0].rindex('/') + 1]

    #if dirs have changed, replace the paths
    if im_dir != old_dir:
        im_files = [im_curr.replace(old_dir, im_dir) for im_curr in im_files]

    #get batch size
    batch_size = getBatchSizeFromDeploy(os.path.join(flo_dir, proto_file))

    #get batch info
    batch_num = [
        int(file_curr[file_curr.rindex('-') + 1:file_curr.rindex('(')])
        for file_curr in flo_files
    ]
    batch_num = np.array(batch_num)
    batch_ids = list(set(batch_num))
    batch_ids.sort()

    flo_files_all = []
    im_files_all = []
    for batch_no in batch_ids:
        idx_rel = np.where(batch_num == batch_no)[0]

        flo_files_curr = []
        im_files_curr = []
        for idx_curr in idx_rel:
            flo_file = flo_files[idx_curr]
            im_no = int(flo_file[flo_file.rindex('(') +
                                 1:flo_file.rindex(')')])
            im_corr = im_files[batch_no * batch_size + im_no]
            flo_files_curr.append(flo_file)
            im_files_curr.append(im_corr)

        flo_files_all.append(flo_files_curr)
        im_files_all.append(im_files_curr)

    #save as mat with flofiles, im_files, and out_dir;
    for idx_batch_no, batch_no in enumerate(batch_ids):
        flo_files = flo_files_all[idx_batch_no]
        im_files = im_files_all[idx_batch_no]

        out_dir_mat = os.path.join(out_dir, video_name + '_' + str(batch_no))
        # print out_dir_mat

        if not os.path.exists(out_dir_mat):
            os.mkdir(out_dir_mat)
        out_file = os.path.join(out_dir_mat, mat_file)
        print out_file
        mat_data = {'flo_files': flo_files, 'im_files': im_files}

        scipy.io.savemat(out_file, mat_data)
import util
import visualize
import os
import preprocessing_data
import scipy.misc
import cv2

# from IPython.display import Image

val_list_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_valImageList.txt'
out_dir = '/home/SSD3/maheen-data/temp'
dir_server = '/home/SSD3/maheen-data'
val_list = util.readLinesFromFile(val_list_file)

path_im, bbox, anno_points = preprocessing_data.parseAnnoFile(val_list_file)

print len(path_im), len(bbox), len(anno_points)
print path_im[0], bbox[0], anno_points[0]

print len(val_list)

# plt.ion();
for idx, (im_path, bbox_curr,
          anno_points_curr) in enumerate(zip(path_im, bbox, anno_points)):
    print im_path
    out_file_curr = os.path.join(out_dir,
                                 str(idx) + '.jpg')
    im = cv2.imread(im_path)
    cv2.rectangle(im, (bbox_curr[0], bbox_curr[2]),
                  (bbox_curr[1], bbox_curr[3]), (255, 0, 0))
示例#58
0
def experiment_nnPatches(params):
    out_file_text = params.out_file_text
    class_ids = params.class_id

    img_paths = util.readLinesFromFile(out_file_text)
    class_id_idx_tuples = []
    for img_path in img_paths:
        class_id = img_path[:img_path.rindex('_')]
        class_id = class_id[class_id.rindex('_') + 1:]
        class_idx = class_ids.index(class_id)
        class_idx = params.class_idx[class_idx]
        class_id_idx_tuples.append((class_id, class_idx))

    file_names_mat, object_indices = recreateOriginalPaths(
        params.path_to_annotation, img_paths, returnObject_idx=True)
    print 'getting azimuths'
    azimuths = [
        getObjectStruct(file_name, object_idx).viewpoint.azimuth_coarse
        for file_name, object_idx in zip(file_names_mat, object_indices)
    ]

    if params.out_file_layers is None:
        print 'running layers part'
        out_file_layers = caffe_wrapper.saveFeaturesOfLayers(
            out_file_text,
            params.path_to_classify,
            params.gpu_no,
            params.layers,
            ext='jpg',
            out_file=params.out_file_pre,
            meanFile=params.caffe_mean,
            deployFile=params.caffe_deploy,
            modelFile=params.caffe_model,
            images_dim=params.images_dim)
        params = params._replace(out_file_layers=out_file_layers)

    out_file_layers = params.out_file_layers

    print 'writing to db'
    for layer in params.layers:
        vals = np.load(out_file_layers)
        indices, distances = nearest_neighbor.doCosineDistanceNN(
            vals[layer], numberOfN=None)
        mani = Pascal3D_Manipulator(params.db_path_out)
        mani.openSession()
        for idx in range(len(img_paths)):
            mani.insert(idx,
                        img_paths[idx],
                        layer,
                        out_file_layers,
                        class_id_idx_tuples[idx][0],
                        class_id_idx_tuples[idx][1],
                        params.caffe_model,
                        azimuth=azimuths[idx],
                        neighbor_index=indices[idx],
                        neighbor_distance=distances[idx],
                        trainedClass=params.trainFlag,
                        commitFlag=False)
        mani.closeSession()

    return params
示例#59
0
def main():
    # dir_meta='/disk2/flow_data';
    # dir_meta_old='/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data';
    # deploy_file='deploy.prototxt';
    # dir_mids=[os.path.join(dir_meta,dir_mid) for dir_mid in os.listdir(dir_meta) if os.path.isdir(os.path.join(dir_meta,dir_mid))]
    # dirs_left=[os.path.join(dir_mid,dir_curr) for dir_mid in dir_mids for dir_curr in os.listdir(dir_mid) if os.path.isdir(os.path.join(dir_mid,dir_curr))]

    # dirs_left=[os.path.join(dir_mid,dir_curr) for dir_mid in dirs_left for dir_curr in os.listdir(dir_mid) if os.path.isdir(os.path.join(dir_mid,dir_curr))]

    # print len(dirs_left);
    # print dirs_left[0];

    # for dir_curr in dirs_left:
    #     deploy_curr=os.path.join(dir_curr,deploy_file);
    #     print deploy_curr
    #     data=[];
    #     with open(deploy_curr,'r') as f:
    #         data = f.read()

    #     with open(deploy_curr+'_backup','w') as f:
    #         f.write(data);

    #     data = data.replace(dir_meta_old, dir_meta)
    #     with open(deploy_curr, "w") as f:
    #         f.write(data);

    # return
    # video_list_file='/disk2/video_data/video_list.txt'
    # path_to_video_meta='/disk2/video_data';

    # path_to_flo_meta='/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data';
    # path_to_im_meta='/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data';

    # video_files=util.readLinesFromFile(video_list_file);
    # # image_dirs=[dir_curr.replace(path_to_video_meta,path_to_im_meta)[:-4] for dir_curr in video_files];
    # # flo_dirs=[dir_curr.replace(path_to_video_meta,path_to_flo_meta)[:-4] for dir_curr in video_files];
    # flo_dirs=pickle.load(open('/disk2/temp/dirs_done.p','rb'));
    # image_dirs=[dir_curr.replace(path_to_flo_meta,path_to_im_meta) for dir_curr in flo_dirs];
    # print len(image_dirs)
    # out_dir='/disk2/image_data_moved';

    # out_file='/disk2/image_data_moved/mv_commands_2.txt'
    # commands=[];
    # image_dirs_to_move=image_dirs[5000:7000];
    # for image_dir in image_dirs_to_move:
    #     image_dir=util.escapeString(image_dir);
    #     new_dir=image_dir.replace(path_to_im_meta,out_dir);
    #     command='mkdir -p '+new_dir+';';
    #     command=command+'mv '+image_dir+'/* '+new_dir;
    #     commands.append(command);
    # util.writeFile('/disk2/image_data_moved/dirs_moved_2.txt',image_dirs_to_move);
    # util.writeFile(out_file,commands);

    # return
    video_list_file = '/disk2/video_data/video_list.txt'
    path_to_video_meta = '/disk2/video_data'

    # path_to_flo_meta='/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data';
    path_to_flo_meta = '/disk2/flow_data'
    path_to_im_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data'

    video_files = util.readLinesFromFile(video_list_file)
    # image_dirs=[dir_curr.replace(path_to_video_meta,path_to_im_meta)[:-4] for dir_curr in video_files];
    # flo_dirs=[dir_curr.replace(path_to_video_meta,path_to_flo_meta)[:-4] for dir_curr in video_files];
    flo_dirs = pickle.load(open('/disk2/temp/dirs_done_disk2.p', 'rb'))
    image_dirs = [
        dir_curr.replace(path_to_flo_meta, path_to_im_meta)
        for dir_curr in flo_dirs
    ]
    print len(image_dirs)
    finished = []
    i = 0
    for image_dir, flo_dir in zip(image_dirs, flo_dirs):
        print i
        count_im_command = 'ls ' + os.path.join(util.escapeString(image_dir),
                                                '*.ppm') + '| wc -l'
        count_flo_command = 'ls ' + os.path.join(util.escapeString(flo_dir),
                                                 '*.flo') + '| wc -l'

        # im_count=int(subprocess.check_output(count_im_command,shell=True));
        # flo_count=int(subprocess.check_output(count_flo_command,shell=True));
        im_count = len([
            file_curr for file_curr in os.listdir(image_dir)
            if file_curr.endswith('.ppm')
        ])
        flo_count = len([
            file_curr for file_curr in os.listdir(flo_dir)
            if file_curr.endswith('.flo')
        ])
        print i, flo_count, im_count
        if flo_count + 1 == im_count:
            finished.append(1)
        else:
            finished.append(0)

        i += 1

    finished = np.array(finished)
    print 'done', sum(finished == 1)
    print 'not done', sum(finished == 0)

    pickle.dump([finished, image_dirs], open('/disk2/temp/to_rerun.p', 'wb'))

    return
    dir_flownet_meta = '/home/maheenrashid/Downloads/flownet/flownet-release/models/flownet'
    caffe_bin = os.path.join(dir_flownet_meta, 'bin/caffe')
    path_to_model = os.path.join(dir_flownet_meta,
                                 'model/flownet_official.caffemodel')

    video_list_file = '/disk2/video_data/video_list.txt'
    path_to_video_meta = '/disk2/video_data'

    in_dir_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data'
    in_dir_meta = '/disk2/flow_data'
    # if not os.path.exists(new_in_dir_meta):
    #     os.mkdir(new_in_dir_meta);

    deploy_name = 'deploy.prototxt'
    gpu = 0

    dirs = [
        dir_curr.replace(path_to_video_meta, in_dir_meta)[:-4]
        for dir_curr in util.readLinesFromFile(video_list_file)
    ]
    dirs = [dir_curr for dir_curr in dirs if os.path.exists(dir_curr)]
    counts = [
        len(os.listdir(dir_curr)) for dir_curr in dirs
        if os.path.exists(dir_curr)
    ]
    dirs_left = []
    dirs_done = []
    for idx_count, count in enumerate(counts):
        if count == 4:
            dirs_left.append(dirs[idx_count])
            # dir_curr=dirs[idx_count]
            # deploy_curr=os.path.join(dir_curr,deploy_name);
            # im_file=os.path.join(dir_curr,'im_1.txt');
            # batch_size = sum(1 for line in open(im_file))

            # old_str='batch_size: '+str(int(ceil(batch_size/5)));
            # print old_str,

            # batch_size = int(ceil(batch_size/8));
            # new_str='batch_size: '+str(batch_size);
            # print new_str

            # data=[];
            # with open(deploy_curr,'r') as f:
            #     data = f.read()
            # # print data[:300];
            # assert old_str in data;
            # data = data.replace(old_str, new_str)
            # # print data[:300];
            # with open(deploy_curr, "w") as f:
            #     f.write(data);

            # out_dir_curr=dir_curr.replace(in_dir_meta,new_in_dir_meta);
            #mkdir of new location
            # mkdir_command='mkdir -p '+util.escapeString(out_dir_curr)
            # print mkdir_command
            # subprocess.call(mkdir_command, shell=True)

            #mv contents from old to new
            # mv_command='mv '+util.escapeString(dir_curr)+'/* '+util.escapeString(out_dir_curr);
            # print mv_command
            # subprocess.call(mv_command, shell=True)
            #append new to dirs_left
            # dirs_left.append(out_dir_curr);
            # raw_input();
        else:
            dirs_done.append(dirs[idx_count])

    print min(counts)
    counts = np.array(counts)
    print sum(counts == 4)
    print len(dirs_left)

    mid_point = len(dirs_left) / 2

    print mid_point, len(dirs_left) - mid_point
    out_file_commands = '/disk2/januaryExperiments/gettingFlows/flownet_commands_left_0.txt'
    gpu = 0
    # writeCommands_hacky(out_file_commands,dirs_left[:mid_point],caffe_bin,deploy_name,path_to_model,gpu)

    out_file_commands = '/disk2/januaryExperiments/gettingFlows/flownet_commands_left_1.txt'
    gpu = 1
    # writeCommands_hacky(out_file_commands,dirs_left[mid_point:],caffe_bin,deploy_name,path_to_model,gpu)

    print len(dirs_done)
    pickle.dump(dirs_done, open('/disk2/temp/dirs_done_disk2.p', 'wb'))

    return
    video_list_file = '/disk2/video_data/video_list.txt'
    path_to_video_meta = '/disk2/video_data'

    in_dir_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data'
    out_dir_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data'
    path_to_deploy = '/disk2/januaryExperiments/gettingFlows/deploy_template.prototxt'
    out_file_commands = '/disk2/januaryExperiments/gettingFlows/flownet_commands.txt'

    dir_flownet_meta = '/home/maheenrashid/Downloads/flownet/flownet-release/models/flownet'
    path_to_sizer = os.path.join(dir_flownet_meta, 'bin/get_image_size')
    caffe_bin = os.path.join(dir_flownet_meta, 'bin/caffe')
    path_to_model = os.path.join(dir_flownet_meta,
                                 'model/flownet_official.caffemodel')

    text_1 = 'im_1.txt'
    text_2 = 'im_2.txt'
    deploy_file = 'deploy.prototxt'
    gpu = 0

    params_dict = {}
    params_dict['video_list_file'] = video_list_file
    params_dict['path_to_video_meta'] = path_to_video_meta
    params_dict['in_dir_meta'] = in_dir_meta
    params_dict['out_dir_meta'] = out_dir_meta
    params_dict['path_to_deploy'] = path_to_deploy
    params_dict['out_file_commands'] = out_file_commands
    params_dict['dir_flownet_meta'] = dir_flownet_meta
    params_dict['path_to_sizer'] = path_to_sizer
    params_dict['caffe_bin'] = caffe_bin
    params_dict['path_to_model'] = path_to_model
    params_dict['text_1'] = text_1
    params_dict['text_2'] = text_2
    params_dict['deploy_file'] = deploy_file
    params_dict['gpu'] = gpu

    params = createParams('writeFlownetCommands')
    params = params(**params_dict)
    # script_writeFlownetCommands(params);
    commands = util.readLinesFromFile(params.out_file_commands)
    commands = [c.replace('-gpu 1', '-gpu 0') for c in commands]
    util.writeFile(params.out_file_commands, commands)
    pickle.dump(params._asdict(),
                open(params.out_file_commands + '_meta_experiment.p', 'wb'))

    return
    video_list_file = '/disk2/video_data/video_list.txt'
    path_to_im_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data'
    path_to_video_meta = '/disk2/video_data'
    commands_file_text = '/disk2/januaryExperiments/gettingFlows/resize_commands.txt'

    video_list = util.readLinesFromFile(video_list_file)
    print len(video_list)
    image_dirs = [
        video_curr.replace(path_to_video_meta, path_to_im_meta)[:-4]
        for video_curr in video_list
    ]
    print len(image_dirs), image_dirs[0]
    image_dirs = image_dirs[:1]

    commands = []
    command_conv = ['convert', '-resize 512x384']
    for image_dir in image_dirs:
        image_list = [
            os.path.join(image_dir, im) for im in os.listdir(image_dir)
            if im.endswith('.ppm')
        ]
        for image_curr in image_list:
            command_curr = [
                command_conv[0], image_curr, command_conv[1], image_curr
            ]
            command_curr = ' '.join(command_curr)
            commands.append(command_curr)

    print len(commands)
    print commands[0]
    util.writeFile(commands_file_text, commands)

    return
    video_list_file = '/disk2/video_data/video_list.txt'
    path_to_im_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data'
    path_to_video_meta = '/disk2/video_data'
    path_to_txt_1 = '/disk2/januaryExperiments/gettingFlows/temp_im_1.txt'
    path_to_txt_2 = '/disk2/januaryExperiments/gettingFlows/temp_im_2.txt'

    video_list = util.readLinesFromFile(video_list_file)
    print len(video_list)
    image_dirs = [
        video_curr.replace(path_to_video_meta, path_to_im_meta)[:-4]
        for video_curr in video_list
    ]
    print len(image_dirs), image_dirs[0]

    list_1 = []
    list_2 = []
    for image_dir in image_dirs[:10]:
        list_1_curr, list_2_curr = getImageListForFlow(image_dir)
        list_1.extend(list_1_curr[:3])
        list_2.extend(list_2_curr[:3])

    assert len(list_1) == len(list_2)

    util.writeFile(path_to_txt_1, list_1)
    util.writeFile(path_to_txt_2, list_2)