def experiment1(datasets, numClusters): ###############---VECTOR CONFIGURATION---################ # Configure data, resulting in a list of dictionaries (labels-->vectors) # There is a dictionary for each dataset, stored in the same order as in the datasets list # dataDictionaries = randomlyConfigureActiveColumns(datasets, 5, True) # OR: dataDictionaries = util.explicitlyConfigureActiveColumns(datasets, [0,1,2,3], True) ###############---VECTOR NORMALIZATION---################ # At this point, have list of dictionaries. Each dictionary contains labels mapping to vectors. # All of the vectors are the same dimensionality, build in the way that we specified for configuration. normalizedDictionaries = [] for d in dataDictionaries: # print d, "\n" normalizedDictionaries.append(normalize.normalize(d)) # THERE ARE ALSO OTHER WAYS TO NORMALIZE ###################---CLUSTERING---##################### clusterResults = cluster.gonzalez(util.crunchDictionaryList(normalizedDictionaries), numClusters, distance.euclidean); ##################---STORE RESULTS---#################### # Prepare to write experiment file clusteringAlgorithmInfo = "gonzalez" distanceMeasurementInfo = "euclidean" vectorConfigurationInfo = "explicitly configured, same columns used across datasets, Indices used: [0,1,2,3]" util.writeFile(1, numClusters, clusteringAlgorithmInfo, distanceMeasurementInfo,vectorConfigurationInfo,"", clusterResults[1])
def writeTrainTxt(out_file_train,video_dirs,im_dir,tif_dir,subsample=5): print len(video_dirs); # video_dirs=video_dirs[:10]; pairs=[]; for idx_vid_dir,vid_dir in enumerate(video_dirs): print idx_vid_dir,vid_dir tif_dir_curr=os.path.join(vid_dir,tif_dir); im_dir_curr=os.path.join(vid_dir,im_dir); tif_names=[file_curr for file_curr in os.listdir(tif_dir_curr) if file_curr.endswith('.tif')]; tif_names=sortTifNames(tif_names); for tif_name in tif_names[::subsample]: # print tif_name jpg_file=os.path.join(im_dir_curr,tif_name.replace('.tif','.jpg')); # print jpg_file,os.path.exists(jpg_file) if os.path.exists(jpg_file): # print jpg_file tif_file=os.path.join(tif_dir_curr,tif_name); pairs.append(jpg_file+' '+tif_file); # raw_input(); print len(pairs); # print pairs[:10]; # pair_one=[p[:p.index(' ')] for p in pairs]; # vid_dirs=[p[:p[:p.rindex('/')].rindex('/')] for p in pair_one]; # print len(set(vid_dirs)); random.shuffle(pairs); util.writeFile(out_file_train,pairs);
def writeMinLossFileLossData(out_file_pre,post_tags,minloss_post,loss_file): new_files=[out_file_pre+post_tag_curr for post_tag_curr in post_tags]; horse_data=util.readLinesFromFile(new_files[0]); horse_data=np.array(horse_data); horse_data_uni=np.unique(horse_data); face_data=util.readLinesFromFile(new_files[1]); face_data_noIm=util.readLinesFromFile(new_files[2]); assert len(face_data)==len(face_data_noIm); loss_all=np.load(loss_file); loss_all=loss_all[:len(face_data)]; assert loss_all.shape[0]==len(face_data); new_data=[[],[],[]]; for idx_curr,horse_curr in enumerate(horse_data_uni): idx_rel=np.where(horse_data==horse_curr)[0]; loss_rel=loss_all[idx_rel]; min_idx=np.argmin(loss_rel); min_idx_big=idx_rel[min_idx]; assert loss_rel[min_idx]==loss_all[min_idx_big]; new_data[0].append(horse_curr); new_data[1].append(face_data[min_idx_big]); new_data[2].append(face_data_noIm[min_idx_big]); new_files_out=[new_file_curr[:new_file_curr.rindex('.')]+minloss_post for new_file_curr in new_files]; for new_file_to_write,data_to_write in zip(new_files_out,new_data): print new_file_to_write,len(data_to_write); util.writeFile(new_file_to_write,data_to_write);
def makeCulpritFile(): out_dir = '/home/SSD3/maheen-data/temp/debug_problem_batch' file_human = '/home/SSD3/maheen-data/horse_project/aflw/matches_5_train_fiveKP_noIm.txt' file_horse = '/home/SSD3/maheen-data/horse_project/horse_resize/matches_5_train_fiveKP.txt' new_file_human = file_human[:file_human.rindex('.')] + '_debug.txt' new_file_horse = file_horse[:file_horse.rindex('.')] + '_debug.txt' batch_no = 3 batch_size = 64 data_horse = util.readLinesFromFile(file_horse) data_human = util.readLinesFromFile(file_human) assert len(data_horse) == len(data_human) print(len(data_horse) / batch_size) # for batch_no in range(71,72): batch_no = 71 line_idx = (batch_size * (batch_no - 1)) % len(data_horse) print('____') print(batch_no) print(line_idx) print data_horse[line_idx] print data_human[line_idx] data_horse_rel = data_horse[line_idx:line_idx + batch_size] data_human_rel = data_human[line_idx:line_idx + batch_size] assert len(data_horse_rel) == batch_size assert len(data_human_rel) == batch_size util.writeFile(new_file_horse, data_horse_rel) util.writeFile(new_file_human, data_human_rel) print new_file_human print new_file_horse
def saveDataTxtFiles(): # horse_data='/home/SSD3/maheen-data/face_data/npy'; horse_data = '/home/SSD3/maheen-data/horse_data/npy' # horse_data = '/home/SSD3/maheen-data/aflw_data/npy'; folders = [ os.path.join(horse_data, folder_curr) for folder_curr in os.listdir(horse_data) if os.path.isdir(os.path.join(horse_data, folder_curr)) ] file_list = [] to_del = [] for folder_curr in folders: file_list_curr = util.getFilesInFolder(folder_curr, '.npy') if len(file_list_curr) == 0: to_del.append(folder_curr) file_list.extend(file_list_curr) for folder_curr in to_del: shutil.rmtree(folder_curr) out_file = os.path.join(horse_data, 'data_list.txt') util.writeFile(out_file, file_list)
def writeSmallDatasetFile(out_file_pre,horse_data,num_neighbor, num_data,in_file_horse,in_file_face,in_file_face_noIm,post_tags=None): if post_tags is None: post_tags=['_horse.txt','_face.txt','_face_noIm.txt']; in_files=[in_file_horse,in_file_face,in_file_face_noIm]; data_org=util.readLinesFromFile(in_file_horse); data_org=np.array(data_org); idx_keep_all=[]; print horse_data.shape horse_data=horse_data[:num_data]; for horse_curr in horse_data: idx_curr=np.where(data_org==horse_curr)[0]; idx_curr=np.sort(idx_curr) idx_keep=idx_curr[:num_neighbor]; idx_keep_all=idx_keep_all+list(idx_keep); # print num_data,idx_keep idx_keep_all=np.array(idx_keep_all); print idx_keep_all.shape files_to_return=[]; for idx_in_file,in_file in enumerate(in_files): out_file_curr=out_file_pre+post_tags[idx_in_file]; if idx_in_file==0: data_keep=data_org[idx_keep_all]; else: data_curr=util.readLinesFromFile(in_file); data_curr=np.array(data_curr); data_keep=data_curr[idx_keep_all]; util.writeFile(out_file_curr,data_keep); files_to_return.append(out_file_curr); return files_to_return;
def shortenTrainingData(train_txt,train_txt_new,ratio_txt,val_txt_new=None): # pos_human='/disk3/maheen_data/headC_160/noFlow_gaussian_human/pos_flos/positives_onlyHuman_withFlow.txt'; # neg_human='/disk3/maheen_data/headC_160/neg_flos/negatives_onlyHuman_withFlow.txt'; # pos_human_small='/disk3/maheen_data/headC_160/noFlow_gaussian_human/pos_flos/positives_onlyHuman_withFlow_oneHundreth.txt'; # neg_human_small='/disk3/maheen_data/headC_160/neg_flos/negatives_onlyHuman_withFlow_oneHundreth.txt'; # ratio_txt=100; # shortenTrainingData(pos_human,pos_human_small,ratio_txt); # shortenTrainingData(neg_human,neg_human_small,ratio_txt); train_data=util.readLinesFromFile(train_txt); # print ratio_txt if ratio_txt<1: ratio_txt=int(len(train_data)*ratio_txt); # print ratio_txt; random.shuffle(train_data); train_data_new=train_data[:ratio_txt]; print len(train_data),len(train_data_new); util.writeFile(train_txt_new,train_data_new); if val_txt_new is not None: val_data=train_data[ratio_txt:]; print len(val_data); util.writeFile(val_txt_new,val_data);
def script_writeCommandsForPreprocessing(all_dirs_file, command_file_pre, num_proc, check_file=None): all_dirs = util.readLinesFromFile(all_dirs_file) all_dirs = [dir_curr[:-1] for dir_curr in all_dirs] if check_file is not None: all_dirs = getRemainingDirs(all_dirs, check_file) command_pre = 'echo ' command_middle_1 = ';cd ~/Downloads/opticalflow; matlab -nojvm -nodisplay -nosplash -r "out_folder=\'' command_middle = '\';saveTrainingData" > ' command_end = ' 2>&1' commands = [] for dir_curr in all_dirs: dir_curr = util.escapeString(dir_curr) log_file = os.path.join(dir_curr, 'log.txt') command = command_pre + dir_curr + command_middle_1 + dir_curr + command_middle + log_file + command_end commands.append(command) idx_range = util.getIdxRange(len(commands), len(commands) / num_proc) command_files = [] for i, start_idx in enumerate(idx_range[:-1]): command_file_curr = command_file_pre + str(i) + '.txt' end_idx = idx_range[i + 1] commands_rel = commands[start_idx:end_idx] util.writeFile(command_file_curr, commands_rel) command_files.append(command_file_curr) return command_files
def moveFilesIntoFolders(in_dir,mat_file,out_dir,out_file_commands,pad_zeros_in=8,pad_zeros_out=4): arr=scipy.io.loadmat(mat_file)['ranges']; # videos=np.unique(arr); commands=[]; for shot_no in range(arr.shape[1]): print shot_no,arr.shape[1]; start_idx=arr[0,shot_no]; end_idx=arr[1,shot_no]; video_idx=arr[2,shot_no]; out_dir_video=os.path.join(out_dir,str(video_idx)); util.mkdir(out_dir_video); # print # raw_input(); shot_idx=np.where(shot_no==np.where(video_idx==arr[2,:])[0])[0][0]+1; out_dir_shot=os.path.join(out_dir_video,str(shot_idx)); util.mkdir(out_dir_shot); # print start_idx,end_idx for idx,frame_no in enumerate(range(start_idx,end_idx+1)): in_file=os.path.join(in_dir,padZeros(frame_no,pad_zeros_in)+'.jpg'); out_file=os.path.join(out_dir_shot,'frame'+padZeros(idx+1,pad_zeros_out)+'.jpg'); command='mv '+in_file+' '+out_file; commands.append(command); print len(commands); util.writeFile(out_file_commands,commands);
def main(): text_list='/disk2/aprilExperiments/dual_flow/list_of_dats_to_move.txt'; text_mv='/disk2/aprilExperiments/dual_flow/list_of_dats_to_move_commands.sh'; models=util.readLinesFromFile(text_list); path_to_storage='/media/maheenrashid/Seagate\ Backup\ Plus\ Drive/maheen_data'; path_to_replace='/disk2'; mv_commands=[]; for model in models: if not os.path.exists(model): continue; dir_curr=model[:model.rindex('/')]; dir_new=dir_curr.replace(path_to_replace,path_to_storage); # print dir_new; command='mkdir -p '+dir_new; # print command; mv_command='mv -v '+model+' '+dir_new+'/'; # print mv_command mv_commands.append(mv_command); subprocess.call(command,shell=True); # raw_input(); util.writeFile(text_mv,mv_commands); print text_mv
def script_writeCommandsForPreprocessing(all_dirs_file,command_file_pre,num_proc,check_file=None): all_dirs=util.readLinesFromFile(all_dirs_file); all_dirs=[dir_curr[:-1] for dir_curr in all_dirs]; if check_file is not None: all_dirs=getRemainingDirs(all_dirs,check_file); command_pre='echo ' command_middle_1=';cd ~/Downloads/opticalflow; matlab -nojvm -nodisplay -nosplash -r "out_folder=\'' command_middle='\';saveTrainingData" > ' command_end=' 2>&1'; commands=[]; for dir_curr in all_dirs: dir_curr=util.escapeString(dir_curr); log_file=os.path.join(dir_curr,'log.txt'); command=command_pre+dir_curr+command_middle_1+dir_curr+command_middle+log_file+command_end; commands.append(command); idx_range=util.getIdxRange(len(commands),len(commands)/num_proc) command_files=[]; for i,start_idx in enumerate(idx_range[:-1]): command_file_curr=command_file_pre+str(i)+'.txt' end_idx=idx_range[i+1] commands_rel=commands[start_idx:end_idx]; util.writeFile(command_file_curr,commands_rel); command_files.append(command_file_curr); return command_files;
def train(dataset): config_options = globals.config task_path = config_options.get("Data", dataset) loss = config_options.get('Train', 'loss') activation = config_options.get('Train', 'activation') if dataset == "classify": Xtrain = z_norm(load_mnist_X(task_path + "classf_Xtrain.txt")) Xtest = z_norm(load_mnist_X(task_path + "classf_Xtest.txt")) Xval = z_norm(load_mnist_X(task_path + "classf_XVal.txt")) ytrain = load_mnist_Y(task_path + "classf_ytrain.txt") ytest = load_mnist_Y(task_path + "classf_ytest.txt") yval = load_mnist_Y(task_path + "classf_yVal.txt") elif dataset == "regression": Xtrain = z_norm(load_regression_X(task_path + "regr_Xtrain.txt")) Xtest = z_norm(load_regression_X(task_path + "regr_Xtest.txt")) Xval = z_norm(load_regression_X(task_path + "regr_Xval.txt")) ytrain = load_regression_Y(task_path + "regr_ytrain.txt") ytest = load_regression_Y(task_path + "regr_ytest.txt") yval = load_regression_Y(task_path + "regr_yval.txt") else: logger.warning("Invalid task.") return logger.info("Load data complete.") # build model N, input_dim = Xtrain.shape model = Model() model.add(Layer(output_dim=globals.layer_dim, input_dim=input_dim)) model.add(Activation(activation=activation)) model.add(Layer(output_dim=globals.output_dim)) model.compile(loss=loss) history = model.fit(Xtrain, ytrain, batch_size=N, iterations=globals.iterations, validation_data=(Xval, yval)) # save result result_dir = config_options.get('Result', 'result-dir') file_name = "_".join([ dataset, activation, str(globals.alpha), str(globals.lam), str(globals.layer_dim), str(globals.iterations) ]) + ".txt" file_path = result_dir + file_name writeFile(file_path, "") for datum in history: datum = [str(x) for x in datum] line = "\t".join(datum) + "\n" writeFile(file_path, line, 'a') print model.loss.mse(Xval, yval) print model.loss.mse(Xtest, ytest)
def fpcCompile (text, encodedText, encoding, fileName): assert type(text) is unicode assert type(encodedText) is str assert encoding != None r = _pPas.match(text) if r != None: modName = r.group(1).encode('ascii') baseName = modName + '.$$$' if fileName == None: fName = baseName # inCurDir = True else: d = os.path.dirname(fileName) if (d == '') or sameFile(os.getcwd(), d): fName = baseName # inCurDir = True else: fName = os.path.join(d, baseName) # inCurDir = False if not os.path.exists(fName): try: try: util.writeFile( fName, encodedText.replace('\t', ' '), sync=False ) except Exception, e: msg = tr('#File write error') + ': ' + exMsg(e) return (msg, None, None) try: e, o = cmd(["fpc", fName]) except Exception, e: msg = 'fpc: ' + exMsg(e) return (msg, None, None) msg = e + o.decode( encoding ) eLines = e.count('\n') errs = [] warns = [] i = eLines for l in o.split('\n'): r = _pfpcLine.match(l + '\n') if r and (r.group(1) == baseName): line = int(r.group(2)) - 1 col = int(r.group(3)) - 1 pos = (line, col) link = (i, pos) m = r.group(4) if m.startswith('Error:') or m.startswith('Fatal:'): errs.append(link) else: warns.append(link) i = i + 1 return (msg, errs, warns) finally:
def convertFileToFloOnly(neg_flo,out_file_neg): neg_flo=util.readLinesFromFile(neg_flo); neg_only_flo=[]; for neg_flo_curr in neg_flo: neg_flo_curr=neg_flo_curr.split(' '); neg_only_flo.append(neg_flo_curr[-1]+' '+neg_flo_curr[1]); assert len(neg_only_flo)==len(neg_flo); util.writeFile(out_file_neg,neg_only_flo);
def problem_examples(options): problem = loadProblemProperties() statementsFile = options.statements + "/" + problem["id"] + ".tex" if not os.path.exists(statementsFile): raise PException('Statements "{}" not found ', problem["id"], statementsFile) checker, interact, tests = findCheckerAndTests() def unescapeTex(line): return (line.replace("\~", "---###TILDE###---").replace( "~", "").replace("$\\sim$", "~").replace("{}", "").replace( "\\\\", "---###SLASH###---").replace("\\", "").replace( "---###SLASH###---", "\\").replace("---###TILDE###---", "~")) tests = 0 state = ["none"] input = [] output = [] for rline in util.readLines(statementsFile): line = rline.strip() if state[0] == "none": if line == "\\exmp{": tests += 1 state[0] = "input" input[0:len(input)] = [] elif state[0] == "input": if line == "}{": state[0] = "output" output[0:len(output)] = [] if input != util.readLines("tests/" + testName(problem, tests)): raise PException( 'EXAMPLES FAILED FOR PROBLEM {}. Example input {} is not equal to test.', problem["id"], tests) else: input += [unescapeTex(rline)] elif state[0] == "output": if line == "}" or line == "}%": state[0] = "none" util.writeFile("__output", "{}", "".join(output)) if run( "Check", "tests/" + testName(problem, tests), "__output", "tests/" + testName(problem, tests) + ".a".format(tests)): raise PException( 'EXAMPLES FAILED FOR PROBLEM {}. Example output {} rejected by checker.', problem["id"], tests) util.removeFiles("__output") else: output += [unescapeTex(rline)] if state[0] != "none": raise PException('Invalid examples murkup.') if tests == 0: raise PException('No examples found.') return 0
def writeScriptToGetFloViz(input_files,output_files,out_file_sh,path_to_binary=None): if path_to_binary is None: path_to_binary='/home/maheenrashid/Downloads/flow-code/color_flow'; lines=[]; for input_file,output_file in zip(input_files,output_files): line=path_to_binary+' '+input_file+' '+output_file; lines.append(line); util.writeFile(out_file_sh,lines);
def fetchAndSaveEmployees(company_urls_with_pages, companyDetail, driver): employees = [] for company_url_with_page in company_urls_with_pages: print("Opening new page...") driver.get(company_url_with_page) employee_list = driver.find_elements_by_class_name("search-results__result-item") (dt, micro) = datetime.utcnow().strftime('%Y%m%d%H%M%S.%f').split('.') dt = "%s%03d" % (dt, int(micro) / 1000) fileName = "output/employees/employeePages-" + dt + ".json" for employee in employee_list: try: employee_element = extractEmployeeElement(driver, employee) if (employee_element == None): employee_element = extractEmployeeElement(driver, employee) employee_name = employee_element.text employee_link = employee_element.find_element_by_tag_name("a").get_attribute( "href") employee_designation = employee.find_elements_by_tag_name("dd")[1].text.split("at")[0].strip() employee_location = employee.find_elements_by_tag_name("dd")[3].text employee_info = {} employee_info['company'] = companyDetail employee_info['name'] = employee_name firstname = "" lastname = "" split_content = employee_name.split(" ", 1) len_of_name = len(split_content) if len_of_name == 1: firstname = split_content[0] elif len_of_name == 2: firstname = split_content[0] lastname = split_content[1] employee_info['firstname'] = firstname employee_info['lastname'] = lastname employee_info['link'] = employee_link employee_info['designation'] = employee_designation employee_info['location'] = employee_location employees.append(employee_info) print(employee_info) util.writeFile(fileName, json.dumps(employee_info)) if(len(employees) == 10): break except Exception as e: print("Error occured while fetching info for " + str(e)) if (len(employees) == 10): break return employees
def writeTrainFilesWithFlow(old_train_file,dir_flo_im,new_train_file,ext='.png'): lines=util.readLinesFromFile(old_train_file); img_files=[line[:line.index(' ')] for line in lines]; file_names=util.getFileNames(img_files,ext=False); flo_im_files=[os.path.join(dir_flo_im,file_name+ext) for file_name in file_names]; for flo_im_file in flo_im_files: assert os.path.exists(flo_im_file); lines_new=[line+' '+flo_im_curr for line,flo_im_curr in zip(lines,flo_im_files)]; util.writeFile(new_train_file,lines_new);
def script_writeTrainFile(): dir_val='/disk2/ms_coco/train2014'; out_dir='/disk2/mayExperiments/train_data'; util.mkdir(out_dir); imgs=util.getEndingFiles(dir_val,'.jpg'); imgs=[os.path.join(dir_val,file_curr) for file_curr in imgs]; imgs.sort(); out_file=os.path.join(out_dir,'train.txt'); util.writeFile(out_file,imgs)
def script_writeCommandsForExperiment(): # out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayer'; # model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/examples/opticalflow/final.caffemodel'; out_dir='/disk3/maheen_data/debug_networks/noFixCopyByLayerAlexNet'; model_file='/home/maheenrashid/Downloads/debugging_jacob/optical_flow_prediction/models/bvlc_alexnet/bvlc_alexnet.caffemodel'; util.mkdir(out_dir); train_txt_orig_path='/disk3/maheen_data/debug_networks/noFix/train.txt'; template_deploy_file='deploy_debug_noFix.prototxt'; template_solver_file='solver_debug.prototxt'; train_file=os.path.join(out_dir,'train.txt'); shutil.copyfile(train_txt_orig_path,train_file); base_lr=0.0001; snapshot=100; layers=['conv1','conv2','conv3','conv4','conv5','fc6','fc7']; command_pre=os.path.join(out_dir,'debug_'); commands=[]; for idx in range(len(layers)): # if idx==0: # fix_layers=layers[0]; # layer_str=str(fix_layers); # model_file_curr=None; # else: fix_layers=layers[:idx+1]; layer_str='_'.join(fix_layers); model_file_curr=model_file # print fix_layers if idx<len(layers)/2: gpu=0; else: gpu=1; snapshot_prefix=os.path.join(out_dir,'opt_noFix_'+layer_str+'_'); out_deploy_file=os.path.join(out_dir,'deploy_'+layer_str+'.prototxt'); out_solver_file=os.path.join(out_dir,'solver_'+layer_str+'.prototxt'); log_file=os.path.join(out_dir,'log_'+layer_str+'.log'); replaceSolverFile(out_solver_file,template_solver_file,out_deploy_file,base_lr,snapshot,snapshot_prefix,gpu); replaceDeployFile(out_deploy_file,template_deploy_file,train_file,fix_layers); command=printTrainingCommand(out_solver_file,log_file,model_file_curr); commands.append(command); command_file_1=command_pre+'0.sh'; util.writeFile(command_file_1,commands[:len(commands)/2]); command_file_2=command_pre+'1.sh'; util.writeFile(command_file_2,commands[len(commands)/2:]);
def script_writeValFile(): dir_val='/disk2/ms_coco/val2014'; out_dir='/disk2/mayExperiments/validation'; util.mkdir(out_dir); imgs=util.getEndingFiles(dir_val,'.jpg'); imgs=[os.path.join(dir_val,file_curr) for file_curr in imgs]; imgs.sort(); imgs=imgs[:5000]; out_file=os.path.join(out_dir,'val.txt'); util.writeFile(out_file,imgs)
def problem_xml(options): problem = loadProblemProperties() contest = loadContestProperties() if options.time_limit: contest["timelimit"] = options.time_limit if options.memory_limit: contest["memorylimit"] = options.memory_limit if options.prefix: contest["problem-prefix"] = options.prefix util.removeFiles("problem.xml") prefix = contest["problem-prefix"] prefix = prefix if prefix == "" or prefix.endswith(".") else prefix + "." checker, interact, tests = findCheckerAndTests() checkerLine = ( '<binary executable-id = "java.check" file = "check.jar"/>' if checker.lower() == "check.jar" else '<binary executable-id = "x86.exe.win32" file = "check.exe"/>') util.writeFile( "problem.xml", """ <problem id = "{id}" version = "1.0" > <judging> <script type = "%icpc"> <testset test-count = "{testNumber}" input-href = "tests/{tests}" answer-href = "tests/{tests}.a" input-name = "{input}" output-name = "{output}" time-limit = "{timelimit}" memory-limit = "{memorylimit}" /> <verifier type = "%testlib"> {checkerLine} </verifier> </script> </judging> </problem>""".format(id=prefix + problem["id"], checkerLine=checkerLine, timelimit=(problem if hasattr(problem, "timelimit") else contest)["timelimit"], memorylimit=(problem if hasattr(problem, "memorylimit") else contest)["memorylimit"], testNumber=len(tests), tests=problem["tests"], input=problem["input"], output=problem["output"])) return 0
def main(): path_meta='/disk2/res11/tubePatches'; out_commands='/disk2/res11/commands_deleteAllImages.txt'; dirs=[os.path.join(path_meta,dir_curr) for dir_curr in os.listdir(path_meta) if os.path.isdir(os.path.join(path_meta,dir_curr))]; print len(dirs); commands=[]; for dir_curr in dirs: dirs_in=[os.path.join(dir_curr,dir_in) for dir_in in os.listdir(dir_curr) if os.path.isdir(os.path.join(dir_curr,dir_in))]; commands.extend(['rm -v '+dir_in+'/*.jpg' for dir_in in dirs_in]); print len(commands); print commands[:10]; util.writeFile(out_commands,commands);
def recordContainingFiles(dirs,num_to_evaluate,out_file_hmdb,post_dir='images',ext='.flo'): random.shuffle(dirs); print len(dirs); dirs=dirs[:num_to_evaluate]; print dirs[0] tifs=[]; for idx_dir_curr,dir_curr in enumerate(dirs): print idx_dir_curr tif_files=[os.path.join(dir_curr,file_curr) for file_curr in util.getFilesInFolder(os.path.join(dir_curr,post_dir),ext)]; tifs.extend(tif_files); print len(tifs) util.writeFile(out_file_hmdb,tifs);
def writeCommands_hacky(out_file_commands,dirs,caffe_bin,deploy_name,path_to_model,gpu): commands=[]; for dir_curr in dirs: out_deploy=os.path.join(dir_curr,deploy_name); args = [caffe_bin, 'test', '-model', util.escapeString(out_deploy), '-weights', path_to_model, '-iterations', '1', '-gpu', str(gpu)] cmd = str.join(' ', args) commands.append(cmd) util.writeFile(out_file_commands,commands);
def script_saveSegSavingInfoFiles(): dir_overlaps = '/disk3/maheen_data/headC_160_noFlow_bbox/mat_overlaps_no_neg_1000'; out_dir='/disk3/maheen_data/debugging_score_and_scale'; img_dir_meta='/disk2/mayExperiments/validation/rescaled_images'; out_dir_npy=os.path.join(out_dir,'npy_for_idx'); out_file_test_pre=os.path.join(out_dir,'test_with_seg'); # out_file_test_big=os.path.join(out_dir,'test_with_seg_big.txt'); util.mkdir(out_dir_npy); num_to_pick=10; mat_overlaps = util.getFilesInFolder(dir_overlaps,'.npz'); # mat_overlaps = mat_overlaps[:10]; args=[]; for idx_mat_overlap_file,mat_overlap_file in enumerate(mat_overlaps): args.append((mat_overlap_file,num_to_pick,idx_mat_overlap_file)); p = multiprocessing.Pool(multiprocessing.cpu_count()); pred_scores_all = p.map(loadAndPickN,args); print len(args); lines_to_write={}; # lines_to_write_big=[]; img_names=util.getFileNames(mat_overlaps,ext=False); for img_name,pred_scores in zip(img_names,pred_scores_all): img_num_uni=np.unique(pred_scores[:,1]); for img_num in img_num_uni: img_num=int(img_num); curr_im=os.path.join(img_dir_meta,str(img_num),img_name+'.jpg'); # print curr_im assert os.path.exists(curr_im); out_dir_npy_curr = os.path.join(out_dir_npy,str(img_num)); util.mkdir(out_dir_npy_curr); out_file = os.path.join(out_dir_npy_curr,img_name+'.npy'); pred_scores_rel = pred_scores[pred_scores[:,1]==img_num,:]; np.save(out_file,pred_scores_rel); if img_num in lines_to_write: lines_to_write[img_num].append(curr_im+' '+out_file); else: lines_to_write[img_num]=[curr_im+' '+out_file]; for img_num in lines_to_write.keys(): out_file_test=out_file_test_pre+'_'+str(img_num)+'.txt'; print out_file_test,len(lines_to_write[img_num]); util.writeFile(out_file_test,lines_to_write[img_num]);
def script_writeNegFile(): dir_flow='/disk2/aprilExperiments/deep_proposals/flow/results_neg' out_text='/disk2/aprilExperiments/deep_proposals/flow/test_neg.txt'; # util.mkdir(dir_flow); neg_text='/disk2/marchExperiments/deep_proposals/negatives.txt'; lines=util.readLinesFromFile(neg_text); neg_images=[line_curr[:line_curr.index(' ')] for line_curr in lines]; neg_images=neg_images[:100]; to_write=[neg_image+' 1' for neg_image in neg_images] util.writeFile(out_text,to_write);
def writeh5ImgFile(dir_neg,out_file_match): lines=[]; h5_files=[os.path.join(dir_neg,file_curr) for file_curr in os.listdir(dir_neg) if file_curr.endswith('.h5')]; print len(h5_files) for idx_file_curr,file_curr in enumerate(h5_files): if idx_file_curr%100==0: print idx_file_curr img_file=util.readLinesFromFile(file_curr.replace('.h5','.txt'))[0].strip(); # print file_curr,img_file lines.append(file_curr+' '+img_file); util.writeFile(out_file_match,lines);
def gpcpCompile (text, encodedText, encoding, fileName): assert type(text) is unicode assert type(encodedText) is str assert encoding != None r = _pMod.match(text) if r != None: modName = r.group(1).encode('ascii') baseName = modName + '.$$$' if not os.path.exists(baseName): try: try: util.writeFile( baseName, encodedText.replace('\t', ' '), sync=False ) except Exception, e: msg = tr('#File write error') + ': ' + exMsg(e) return (msg, None, None) try: e, o = cmd(["gpcp", "/nodebug", "/hsize=32000", "/unsafe", baseName]) except Exception, e: msg = 'gpcp: ' + exMsg(e) return (msg, None, None) msg = e + o.decode( encoding ) eLines = e.count('\n') errs = [] warns = [] i = eLines state = 0 # 0 - outside, 1 - line pos matched for l in o.split('\n'): if state == 0: r = _pgpcpLine.match(l) if r: line = int(r.group(1)) - 1 state = 1 # line pos matched elif state == 1: if ' Warning: ' in l: x = warns else: x = errs col = l.split('^')[0].count('-') pos = (line, col) x.append( (i - 1, pos) ) x.append( (i, pos) ) state = 0 # outside i = i + 1 return (msg, errs, warns) finally:
def writeNewFileWithFlow(pos_data,flow_files,im_files,out_file_pos): pos_data_1=[pos_data_curr[:pos_data_curr.index(' ')] for pos_data_curr in pos_data] new_pos_data=[] for idx_flow_file,flow_file in enumerate(flow_files): img_file_corr=im_files[idx_flow_file]; pos_data_corr=pos_data[pos_data_1.index(img_file_corr)]; new_pos=pos_data_corr+' '+flow_file; new_pos_data.append(new_pos); print new_pos_data[0]; util.writeFile(out_file_pos,new_pos_data);
def script_makeUCFTestTrainTxt(): dir_meta='/home/maheenrashid/Downloads/opticalflow/videos/v_BabyCrawling_g01_c01/images'; out_dir='/disk3/maheen_data/debug_networks/sanityCheckDebug'; util.mkdir(out_dir); train_file=os.path.join(out_dir,'train.txt'); tifs=util.getFilesInFolder(dir_meta,'.tif'); imgs=[file_curr.replace('.tif','.jpg') for file_curr in tifs]; for file_curr in imgs: assert os.path.exists(file_curr) lines=[img+' '+tif for img,tif in zip(imgs,tifs)]; util.writeFile(train_file,lines);
def saveOutputInfoFileMP(folder,out_file_text,out_files_test): if type(folder)!=type('str'): list_files=folder; else: list_files=util.getFilesInFolder(folder,'.h5'); args=[]; for list_file in list_files: args.append((list_file,out_files_test)) p=multiprocessing.Pool(NUM_THREADS); lines_to_write=p.map(getOutputInfoMP,args); lines_to_write=[line_curr for line_curr in lines_to_write if line_curr is not None]; util.writeFile(out_file_text,lines_to_write);
def saveMinEqualFrames(train_new_text,out_file_idx,out_file_eq,includeHuman=True): lines=util.readLinesFromFile(train_new_text); img_paths=[line[:line.index(' ')] for line in lines]; p=multiprocessing.Pool(multiprocessing.cpu_count()); vals=p.map(getDataSetAndVideoName,img_paths); [dataset,video]=zip(*vals) dataset=np.array(dataset); print np.unique(dataset); frame_idx_rec={}; if includeHuman: frame_idx_rec['human']=list(np.where(dataset=='hmdb_try_2')[0]); for idx,video_curr in enumerate(video): if dataset[idx]=='youtube': class_curr=video_curr[:video_curr.index('_')]; if class_curr in frame_idx_rec: frame_idx_rec[class_curr].append(idx); else: frame_idx_rec[class_curr]=[idx]; for class_curr in frame_idx_rec.keys(): print class_curr,len(frame_idx_rec[class_curr]); min_frames=min([len(val_curr) for val_curr in frame_idx_rec.values()]); print 'min_frames',min_frames idx_to_pick=[]; for class_curr in frame_idx_rec.keys(): idx_curr=frame_idx_rec[class_curr]; random.shuffle(idx_curr); idx_to_pick.extend(idx_curr[:min_frames]); # print class_curr,len(frame_idx_rec[class_curr]); idx_all=[idx_curr for idx_curr_all in frame_idx_rec.values() for idx_curr in idx_curr_all]; print len(idx_all),len(lines); assert len(idx_all)==len(lines); idx_all.sort(); print idx_all==list(range(len(lines))); assert idx_all==list(range(len(lines))); lines_to_keep=[lines[idx_curr] for idx_curr in idx_to_pick]; print len(lines_to_keep); np.save(out_file_idx,np.array(idx_to_pick)) util.writeFile(out_file_eq,lines_to_keep);
def writeTrainingDataFiles(dir_content_file,pre_dir,img_dir,out_file_text,ignore_amount=-2,postfix='.jpg'): start_idx=len(pre_dir); files=util.readLinesFromFile(dir_content_file); lines_to_write=[]; for idx_file_curr,file_curr in enumerate(files): if idx_file_curr%1000==0: print idx_file_curr file_name=file_curr[start_idx+1:]; file_name=file_name.split('_'); file_name='_'.join(file_name[:ignore_amount]); file_name=file_name+postfix; file_name=os.path.join(img_dir,file_name); lines_to_write.append(file_name+' '+file_curr); util.writeFile(out_file_text,lines_to_write);
def script_writeHumanOnlyNegFile(): neg_file_old='/disk2/marchExperiments/deep_proposals/negatives.txt' neg_file_new='/disk2/marchExperiments/deep_proposals/negatives_onlyHuman.txt' npy_dir_old='/disk2/marchExperiments/deep_proposals/negatives' npy_dir_new='/disk2/aprilExperiments/negatives_npy_onlyHuman' lines=util.readLinesFromFile(neg_file_old); lines_new=[line.replace(npy_dir_old,npy_dir_new) for line in lines]; for line in lines_new: assert npy_dir_new in line; print len(lines),len(lines_new); print lines_new[0]; util.writeFile(neg_file_new,lines_new);
def writeCommands_hacky(out_file_commands, dirs, caffe_bin, deploy_name, path_to_model, gpu): commands = [] for dir_curr in dirs: out_deploy = os.path.join(dir_curr, deploy_name) args = [ caffe_bin, 'test', '-model', util.escapeString(out_deploy), '-weights', path_to_model, '-iterations', '1', '-gpu', str(gpu) ] cmd = str.join(' ', args) commands.append(cmd) util.writeFile(out_file_commands, commands)
def writeTrainTxt(train_data_file,all_dirs): strings=[]; for no_dir_curr,dir_curr in enumerate(all_dirs): print no_dir_curr,dir_curr # dir_curr=dir_curr[:-1]; curr_flos=[os.path.join(dir_curr,curr_flo) for curr_flo in os.listdir(dir_curr) if curr_flo.endswith('.tif')]; for curr_flo in curr_flos: curr_im=curr_flo.replace('.tif','.jpg'); assert os.path.exists(curr_im); string_curr=curr_im+' '+curr_flo+' ' strings.append(string_curr); print len(strings); # print strings[:3]; # random.shuffle(strings); util.writeFile(train_data_file,strings);
def writeClassTextFile(train_val_txt, path_to_im, out_file): lines = util.readLinesFromFile(train_val_txt) pos_im = [] lines_split = [line.split(' ', 1) for line in lines] for idx, line_split in enumerate(lines_split): num = int(line_split[1]) pos_im = [ line_split[0] for line_split in lines_split if int(line_split[1]) >= 0 ] ims = [ os.path.join(path_to_im, pos_im_curr + '.jpg') for pos_im_curr in pos_im ] util.writeFile(out_file, ims)
def translate_query(self, query_text): """ Perform the actual translation. :param query_text: :param relation_oracle: :param entity_oracle: :return: """ partial_result = "Query: " + query_text + '\n' # Parse query. logger.info("Translating query: %s." % query_text) start_time = time.time() # Parse the query. query = self.parse_and_identify_entities(query_text) # Set the relation oracle. query.relation_oracle = self.scorer.get_parameters().relation_oracle for e in query.identified_entities: partial_result += "Entity: " + str((e.name, e.surface_score, e.score, e.perfect_match)) + '\n' #logging.error((e.name, e.surface_score, e.score, e.perfect_match)) # Identify the target type. target_identifier = AnswerTypeIdentifier() target_identifier.identify_target(query) #logging.error(query.target_type.as_string()) partial_result += "TargetType: " + str(query.target_type.as_string()) + "\n" # Get content tokens of the query. query.query_content_tokens = get_content_tokens(query.query_tokens) # Match the patterns. pattern_matcher = QueryPatternMatcher(query, self.query_extender, self.sparql_backend) ert_matches = [] ermrt_matches = [] ermrert_matches = [] ert_matches = pattern_matcher.match_ERT_pattern() ermrt_matches = pattern_matcher.match_ERMRT_pattern() ermrert_matches = pattern_matcher.match_ERMRERT_pattern() partial_result += "Pattern matches: ERT = %d, ERMRT = %d, ERMRERT = %d\n" % (len(ert_matches), len(ermrt_matches), len(ermrert_matches)) writeFile(test_file, partial_result, "a") duration = (time.time() - start_time) * 1000 logging.info("Total translation time: %.2f ms." % duration) return ert_matches + ermrt_matches + ermrert_matches
def banner(problemDir, comments, file, type=None, author=None): contest = loadContestProperties() problem = loadProblemProperties() if type and not author: author = file.split(".")[0].split("_")[1] line = comments["begin"] + comments["line"] * 64 + comments["end"] def text(format, *args): return comments["begin"] + " " + format.format( *args).ljust(63) + comments["end"] def multiline(name, property): result = [] for author in map(expand, problem[property].split(" ")): result += [aligned(name, author)] name = "" return result def aligned(name, value): return text("{:<21} {}", name, value) def expand(author): if not "juror." + author in contest: raise PException('Cannot find juror "{}"', author) return contest["juror." + author] util.writeFile( problemDir + file, "{}", "\n".join( [ line, text("{}", contest["name-1"]), text("{}", contest["name-2"]), text("{}", contest["location"] + ", " + contest["date"]), line, text("Problem {}. {}", problem["alias"], problem["name"]), text("") ] + multiline("Original idea", "idea") + multiline("Problem statement", "statement") + multiline("Test set", "testset") + ([ line, text("{}", type), text(""), aligned("Author", expand(author)), ] if type else []) + [ line, "", "", ]) + "".join(util.readLines(file)))
def experiment2(datasets, numClusters, dimensionality): # ------------------------------------------------------------ # PART 1: CHOOSING DATA # ------------------------------------------------------------ ###############---VECTOR CONFIGURATION---################ ###############---REDUCTION WITH REGRESSION---################= for d in datasets: reducedDictionary = regression.getReducedSpacePCA(d.getVectors(), dimensionality) d.setReducedDictionary(reducedDictionary, dimensionality) # ------------------------------------------------------------ # PART 2: NORMALIZATION AND CHOOSING DISTANCE MEASURE # ------------------------------------------------------------ ###############---VECTOR NORMALIZATION---################ # At this point, have list of dictionaries of uniform dimensionality. # Each dictionary contains labels mapping to vectors. normalizedDictionaries = [] for d in datasets: normalizedDictionaries.append(d.getReducedVectors()) # THERE ARE ALSO OTHER WAYS TO NORMALIZE # ------------------------------------------------------------ # PART 3: RUN # ------------------------------------------------------------ ###################---CLUSTERING---##################### clusterResults = cluster.lloyds(util.crunchDictionaryList(normalizedDictionaries), numClusters, distance.infNorm); print clusterResults[1]; # ------------------------------------------------------------ # PART 4: WRITE RESULTS # ------------------------------------------------------------ ##################---STORE RESULTS---#################### # def writeFile(expIndex, numClusters, clusteringAlgorithmInfo, distanceMeasurementInfo, vectorConfigurationInfo, clusters): # Prepare to write experiment file -- fill in the below values for this experiment. clusteringAlgorithmInfo = "gonzalez" distanceMeasurementInfo = "euclidean" vectorConfigurationInfo = "{}, {}".format("configured using regression, reduced to dimensionality:", dimensionality) util.writeFile("maks", numClusters, clusteringAlgorithmInfo, distanceMeasurementInfo,vectorConfigurationInfo, "Trying to get this thing to work!",clusterResults[1])
def modifyHumanFile(orig_file, new_file): data = util.readLinesFromFile(orig_file) data = [ tuple([idx] + data_curr.split(' ')) for idx, data_curr in enumerate(data) ] p = multiprocessing.Pool(multiprocessing.cpu_count()) new_lines = p.map(modifyHumanFileMultiProc, data) # new_lines=[]; # for idx,(im_file,npy_file) in enumerate(data): # print idx,len(data); # im=scipy.misc.imread(im_file); # im_size=im.shape; # line_curr=npy_file+' '+str(im.shape[0])+' '+str(im.shape[1]); # new_lines.append(line_curr); print len(new_lines) print new_lines[0] util.writeFile(new_file, new_lines)
def main(): path_meta = '/disk2/res11/tubePatches' out_commands = '/disk2/res11/commands_deleteAllImages.txt' dirs = [ os.path.join(path_meta, dir_curr) for dir_curr in os.listdir(path_meta) if os.path.isdir(os.path.join(path_meta, dir_curr)) ] print len(dirs) commands = [] for dir_curr in dirs: dirs_in = [ os.path.join(dir_curr, dir_in) for dir_in in os.listdir(dir_curr) if os.path.isdir(os.path.join(dir_curr, dir_in)) ] commands.extend(['rm -v ' + dir_in + '/*.jpg' for dir_in in dirs_in]) print len(commands) print commands[:10] util.writeFile(out_commands, commands)
def saveOutputInfoFile(folder,out_file_text): if type(folder)!=type('str'): list_files=folder; else: list_files=util.getFilesInFolder(folder,'.h5'); img_files=getImgFilesFromH5s(list_files); lines_to_write=[]; for idx,img_file in enumerate(img_files): im=scipy.misc.imread(img_file); if len(im.shape)>2: str_size=[im.shape[0],im.shape[1],im.shape[2]]; else: str_size=[im.shape[0],im.shape[1],1]; str_size=[str(i) for i in str_size] line_curr=[list_files[idx],img_file]+str_size; line_curr=' '.join(line_curr); lines_to_write.append(line_curr) util.writeFile(out_file_text,lines_to_write);
def contest_xml(options): contest = loadContestProperties("contest.properties") body = "\n ".join([ '<problem-ref id = "{short-id}" alias = "{alias}" problem-id = "{id}" name = "{name}"/>' .format(**loadProblemProperties(file, file + "/problem.properties")) for file in os.listdir() if os.path.isdir(file) if not file.startswith("_") and not file.startswith(".") ]) util.writeFile( "challenge.xml", """ <challenge id = "{id}" name = "{name-1}, {name-2}" scoring-model = "%icpc" length = "5h" problem-id = "{problem-prefix}" xmlai-process = "http://neerc.ifmo.ru/develop/pcms2/xmlai/default-rules.xml" > {body} </challenge>""".format(body=body, **contest)) return 0
async def on_message(message): channel = client.get_channel(int(os.getenv('CHANNEL_ID'))) if message.author == client.user: return mess = message.content.split(' ') mess = mess[0].strip() if message.channel == channel: lastMsg = util.readFile() if not mess.isnumeric(): await message.delete() prompt = f"Message sent by {message.author.mention} was deleted because it violated game rules.\nLast valid count: {lastMsg}" await channel.send(prompt) else: res = int(mess) - lastMsg if res != 1: await message.delete() prompt = f"Message sent by {message.author.mention} was deleted because it violated game rules.\nLast valid count: {lastMsg}" await channel.send(prompt) else: util.writeFile(str(lastMsg + 1))
def writeJustTestScript(out_file_sh, val_data_path, iterations, batch_size, model_out_tups, face): file_th = '/home/maheenrashid/Downloads/horses/torch/justTest.th' commands_all = [] for model_path_curr, out_dir_curr in model_out_tups: command_curr = ['th', file_th] command_curr = command_curr + ['-val_data_path', val_data_path] command_curr = command_curr + ['-iterations', str(iterations)] command_curr = command_curr + ['-batchSize', str(batch_size)] command_curr = command_curr + ['-full_model_path', model_path_curr] command_curr = command_curr + ['-outDirTest', out_dir_curr] if face: command_curr = command_curr + ['-face'] command_curr = ' '.join(command_curr) # print command_curr; commands_all.append(command_curr) util.writeFile(out_file_sh, commands_all) print len(commands_all) print out_file_sh
def script_writeFlownetCommands(params): video_list_file = params.video_list_file path_to_video_meta = params.path_to_video_meta in_dir_meta = params.in_dir_meta out_dir_meta = params.out_dir_meta path_to_deploy = params.path_to_deploy out_file_commands = params.out_file_commands dir_flownet_meta = params.dir_flownet_meta path_to_sizer = params.path_to_sizer caffe_bin = params.caffe_bin path_to_model = params.path_to_model text_1_org = params.text_1 text_2_org = params.text_2 deploy_file = params.deploy_file gpu = params.gpu im_dirs = util.readLinesFromFile(video_list_file) im_dirs = [ im_dir.replace(path_to_video_meta, in_dir_meta)[:-4] for im_dir in im_dirs ] commands = [] # im_dirs=['/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data/hmdb/pick/THE_WALLET_TRICK!!!_pick_f_cm_np2_ba_med_1']; for idx_im_dir, im_dir in enumerate(im_dirs): print idx_im_dir, len(im_dirs) out_dir_curr = im_dir.replace(in_dir_meta, out_dir_meta) text_1 = os.path.join(out_dir_curr, text_1_org) text_2 = os.path.join(out_dir_curr, text_2_org) out_deploy = os.path.join(out_dir_curr, deploy_file) subprocess.call('mkdir -p ' + util.escapeString(out_dir_curr), shell=True) list_1, list_2 = getImageListForFlow(im_dir) util.writeFile(text_1, list_1) util.writeFile(text_2, list_2) # im_test=util.escapeString(list_1[0]); dim_list = [ int(dimstr) for dimstr in str( subprocess.check_output([path_to_sizer, list_1[0]])).split(',') ] replaceProto(path_to_deploy, out_deploy, dim_list, text_1, text_2, len(list_1), out_dir_curr) args = [ caffe_bin, 'test', '-model', util.escapeString(out_deploy), '-weights', path_to_model, '-iterations', '1', '-gpu', str(gpu) ] cmd = str.join(' ', args) commands.append(cmd) # print('Executing %s' % cmd) util.writeFile(out_file_commands, commands)
def execute(driver, pageUrl): companiesNameUrl = dict() driver.get(pageUrl) try: companies = driver.find_elements( By.CSS_SELECTOR, '#seo-dir > div > div:nth-child(3) > div > ul > li > a') for company in companies: try: companyName = company.text companyUrl = company.get_attribute('href') row = {'n': companyName, 'u': companyUrl} util.writeFile("resources/companyIndex.json", json.dumps(row)) print(companyName) except Exception: print("Failed to get company detail for " + company.text) except TimeoutException: print("Loading took too much time!") return companiesNameUrl
def main(): dir_vids = '/disk2/aprilExperiments/horses/mediaFromPPT' dir_frames = '/disk2/aprilExperiments/horses/mediaFromPPT_frames' out_file_commands = '/disk2/aprilExperiments/horses/extract_frames.txt' util.mkdir(dir_frames) command_template = 'ffmpeg -i VIDEONAME -vf fps=1 OUTPRE%05d.jpg' vids = [ os.path.join(dir_vids, file_curr) for file_curr in os.listdir(dir_vids) if file_curr.endswith('.mp4') ] out_pres = [ os.path.join( dir_frames, file_curr[file_curr.rindex('/') + 1:file_curr.rindex('.')] + '_') for file_curr in vids ] commands = [] for vid, out_pre in zip(vids, out_pres): command_curr = command_template.replace('VIDEONAME', vid) command_curr = command_curr.replace('OUTPRE', out_pre) commands.append(command_curr) util.writeFile(out_file_commands, commands)
elif method == 'dfs': start_time = time.time() bottom = dfs(root, goal) dic['running_time'] = '%s' % round(time.time() - start_time, 8) elif method == 'ast': pass start_time = time.time() bottom = ast(root, goal) dic['running_time'] = '%s' % round(time.time() - start_time, 8) elif method == 'ada': pass try: import resource dic['max_ram_usage'] = '%s' % round( resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000, 8) except: pass while bottom: if bottom.move: dic['path_to_goal'].insert(0, bottom.move) dic['cost_of_path'] = dic['cost_of_path'] + 1 bottom = bottom.parent writeFile(dic)
def main(): out_file_html = '/disk2/aprilExperiments/horses/frames_with_detections/visualize.html' out_dir_meta = '/disk2/aprilExperiments/horses/frames_with_detections' img_paths = [] captions = [] rel_path = ['/disk2', '../../../..'] for dir_curr in os.listdir(out_dir_meta): dir_curr = os.path.join(out_dir_meta, dir_curr) if os.path.isdir(dir_curr): print dir_curr jpegs = [ os.path.join(dir_curr, file_curr) for file_curr in os.listdir(dir_curr) if file_curr.endswith('.png') ] jpegs = [ file_curr.replace(rel_path[0], rel_path[1]) for file_curr in jpegs ] # print jpegs[:10]; jpegs.sort() # print jpegs[:10]; captions_curr = [''] * len(jpegs) print captions_curr img_paths.append(jpegs) captions.append(captions_curr) # raw_input(); visualize.writeHTML(out_file_html, img_paths, captions, height=100, width=100) return dirs_meta = [ '/disk2/aprilExperiments/horses/mediaFromPPT_frames', '/disk2/aprilExperiments/horses/ResearchSpring2016_frames' ] out_file = '/disk2/aprilExperiments/horses/list_of_frames.txt' im_list = [] for dir_curr in dirs_meta: list_curr = [ os.path.join(dir_curr, im_curr) for im_curr in os.listdir(dir_curr) if im_curr.endswith('.jpg') ] im_list = im_list + list_curr util.writeFile(out_file, im_list) return in_file = '/disk2/aprilExperiments/horses/list_of_frames.txt' out_dir_meta = '/disk2/aprilExperiments/horses/frames_with_detections/' util.mkdir(out_dir_meta) with open(in_file, 'rb') as f: im_names = f.readlines() im_names = [line.strip('\n') for line in im_names] for im_name in im_names: vid_name = im_name[im_name.rindex('/') + 1:im_name.rindex('_')] out_dir_curr = os.path.join(out_dir_meta, vid_name) if not os.path.exists(out_dir_curr): os.mkdir(out_dir_curr) return out_dir = '/disk2/temp/horses' arr_file = os.path.join(out_dir, 'Outside4_00011_horse_detections.npy') im_file = '/disk2/aprilExperiments/horses/ResearchSpring2016_frames/Outside4_00011.jpg' arr = np.load(arr_file) out_file = arr_file[:-4] + '.png' saveDets(im_file, 'horse', arr, out_file, 0.8) # plt.imshow(im); # plt.savefig(); print 'done'
def makeMatchFile(num_neighbors, matches_file, face_data_file, out_dir_meta_horse, out_dir_meta_face, out_file_horse, out_file_face, out_dir_meta_face_old=None): face_data = util.readLinesFromFile(face_data_file) face_data = [ ' '.join(line_curr.split(' ')[:num_neighbors]) for line_curr in face_data ] matches_list = util.readLinesFromFile(matches_file) matches_split = [match_curr.split(' ') for match_curr in matches_list] horse_list = [match_split[0] for match_split in matches_split] match_data = [] missing_files = [] for match_split in matches_split: match_split_new = [match_split[0]] horse_path, horse_file_name = os.path.split(match_split[0]) horse_file_name = horse_file_name[:horse_file_name.rindex('.')] horse_path = horse_path.split('/') if horse_path[-1] == 'gxy': horse_path = horse_path[-2] else: horse_path = horse_path[-1] horse_file_out = os.path.join(out_dir_meta_horse[0], horse_path, horse_file_name + '.jpg') horse_file_npy_out = os.path.join(out_dir_meta_horse[1], horse_path, horse_file_name + '.npy') continue_flag = False for matches_idx in range(num_neighbors): start_idx = (matches_idx * num_neighbors) + 1 end_idx = start_idx + num_neighbors match_curr = match_split[start_idx:end_idx] match_curr = ' '.join(match_curr) if match_curr in face_data: idx_curr = face_data.index(match_curr) elif ('lfw_5590/' in match_curr) or ('net_7876/' in match_curr): # print ('valid',match_curr) idx_curr = -1 else: print('invalid', match_curr) missing_files.append( (horse_file_out, horse_file_npy_out, match_curr)) continue file_match_curr = match_curr.split(' ')[0] path_curr, file_curr = os.path.split(file_match_curr) path_curr = path_curr.split('/')[-1] file_curr = file_curr[:file_curr.rindex('.')] if idx_curr >= 0: file_curr = file_curr + '_' + str(idx_curr) file_match_curr = os.path.join(out_dir_meta_face[0], path_curr, file_curr + '.jpg') file_match_npy_curr = os.path.join(out_dir_meta_face[1], path_curr, file_curr + '.npy') else: file_match_curr = os.path.join(out_dir_meta_face_old[0], path_curr, file_curr + '.jpg') file_match_npy_curr = os.path.join(out_dir_meta_face_old[1], path_curr, file_curr + '.npy') match_data.append([ horse_file_out, horse_file_npy_out, file_match_curr, file_match_npy_curr ]) valid_matches = [] not_exist = [] for match_curr in match_data: keep = True for idx, file_curr in enumerate(match_curr): if not os.path.exists(file_curr): if idx > 0: print 'not exist', match_curr, file_curr not_exist.append(file_curr) keep = False break if keep: valid_matches.append((match_curr[0] + ' ' + match_curr[1], match_curr[2] + ' ' + match_curr[3])) not_exist = set(not_exist) print len(not_exist) print len(match_data), len(valid_matches) util.writeFile(out_file_horse, [data_curr[0] for data_curr in valid_matches]) util.writeFile(out_file_face, [data_curr[1] for data_curr in valid_matches]) util.modifyHumanFile(out_file_face, out_file_face_noIm) return not_exist
if __name__ == "__main__": print("Started company info extractor") proxies = util.getProxies() driver = util.openChromeBrowser(proxies) input_file = sys.argv[1] with open(input_file) as f: data = json.load(f) i = 1 records = [] totalRecordsCompleted = 0 for company in data: print("performing for company " + company['u']) companyDetails = execute(driver, company['u']) records.append(companyDetails) i = i + 1 totalRecordsCompleted = totalRecordsCompleted + 1 if (i == 50 or len(data) == totalRecordsCompleted): (dt, micro ) = datetime.utcnow().strftime('%Y%m%d%H%M%S.%f').split('.') dt = "%s%03d" % (dt, int(micro) / 1000) fileName = "companyPages-" + dt + ".json" filecontent = json.dumps(records) util.writeFile("output/" + fileName, filecontent) time.sleep(10) driver = util.openChromeBrowser(proxies) i = 1 records = [] print("Ended company info builder")
def main(): # data='/home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt' # # /home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt # to_search=\ # ['/home/SSD3/maheen-data/horse_project/data_check/horse/im/horses_pascal_selected/2009_004662.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/horses_pascal_selected/2009_004662.npy', # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_11539.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_11539.npy', # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_16786.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_16786.npy', # '/home/SSD3/maheen-data/horse_project/data_check/horse/im/imagenet_n02374451/n02374451_4338.jpg /home/SSD3/maheen-data/horse_project/data_check/horse/npy/imagenet_n02374451/n02374451_4338.npy'] # data=util.readLinesFromFile(data); # print data[0]; # to_search=[file_curr.replace('data_check','data_resize') for file_curr in to_search]; # idx_lines=[data.index(line_curr) for line_curr in to_search if line_curr in data]; # print idx_lines; # for idx_line_curr in idx_lines: # print 'batch_no',(idx_line_curr)/64 # # npy_files=[file_curr[file_curr.index(' ')+1:] for file_curr in data]; # # print npy_files[0]; # # print len(npy_files); # # p=multiprocessing.Pool(multiprocessing.cpu_count()); # # problem_files=p.map(findProblemNPYMP,npy_files); # # problem_files=[file_curr for file_curr in problem_files if file_curr is not None]; # # print (len(problem_files)); # return # data='/home/laoreja/new-deep-landmark/train/vanilla/aflw_224/aflw_vanilla_val_224.txt'; # data='/home/laoreja/new-deep-landmark/train/vanilla/aflw_224/aflw_vanilla_train_224_weight.txt'; # data=util.readLinesFromFile(data); # print data; # total=0; # for h5_file_curr in data: # with h5py.File(h5_file_curr,'r') as hf: # print('List of arrays in this file: ', hf.keys()) # data = hf.get('confidence') # np_data = np.array(data) # total=total+np_data.shape[0]; # print('Shape of the array dataset_1: ', np_data.shape) # print total; # return # horse_path='/home/SSD3/maheen-data/horse_project/data_resize/horse/matches_5_train_allKP.txt' # human_path_noIm='/home/SSD3/maheen-data/horse_project/data_resize/aflw/matches_5_train_allKP_noIm.txt' # human_path='/home/SSD3/maheen-data/horse_project/data_resize/aflw/matches_5_train_allKP.txt' # paths=[horse_path,human_path_noIm,human_path]; # out_files=[file_curr[:file_curr.rindex('.')]+'_dummy.txt' for file_curr in paths]; # for file_curr,out_file_curr in zip(paths,out_files): # data_curr=util.readLinesFromFile(file_curr); # data_curr=data_curr[0:50:5]; # # print data_curr; # print len(data_curr); # util.writeFile(out_file_curr,data_curr); # print out_file_curr; # return # im_path= "/home/SSD3/maheen-data/horse_project/data_resize/horse/im/_04_Aug16_png/horse+head12.jpg" # # 2 : "/home/SSD3/maheen-data/horse_project/data_resize/horse/npy/_04_Aug16_png/horse+head12.npy" # # "/home/SSD3/maheen-data/horse_project/data_resize/aflw/im/0/image67102_20650.jpg" # np_path="/home/SSD3/maheen-data/horse_project/data_resize/horse/npy/_04_Aug16_png/horse+head12.npy" # # "/home/SSD3/maheen-data/horse_project/data_resize/aflw/npy/0/image67102_20650.npy" # # im=scipy.misc.read(im_path); # im=cv2.imread(im_path); # labels=np.load(np_path); # print labels # for i in xrange(labels.shape[0]): # cv2.circle(im, (labels[i][0], labels[i][1]), 2, (0,0,255), -1) # cv2.imwrite('/home/SSD3/maheen-data/temp/check.png', im) # return # path_to_th='/home/maheenrashid/Downloads/horses/torch/test_tps_cl.th'; # iterations=10; # out_dir_models='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam' # model_pre=os.path.join(out_dir_models,'intermediate','model_all_'); # model_post='.dat'; # range_models=range(450,4500+1,450); # out_dir_meta=os.path.join(out_dir_models,'test_overtime'); # batch_size=60; # # commands=generateTPSTestCommands(path_to_th,batch_size,iterations,model_pre,model_post,range_models,out_dir_meta) # # print len(commands); # # print commands[0]; # # out_file_commands=os.path.join(out_dir_meta+'.sh'); # # util.writeFile(out_file_commands,commands); # dir_server='/home/SSD3/maheen-data'; # range_batches=range(1,10); # # batch_size=60; # range_images=range(1,61,5); # img_dir_meta='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_overtime' # img_dir=[os.path.join(img_dir_meta,'model_all_'+str(range_model_curr)) for range_model_curr in range_models] # out_file_html='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz.html' # file_post=['_horse.jpg','_human.jpg','_gtwarp.jpg','_predwarp.jpg'] # loss_post='_loss.npy'; # out_file_html=img_dir_meta+'.html'; # img_caption_pre=[str(model_num) for model_num in range_models]; # comparativeLossViz(img_dir,file_post,loss_post,range_batches,range_images,out_file_html,dir_server,img_caption_pre) # return dir_server = '/home/SSD3/maheen-data' range_batches = range(1, 9) # batch_size=60; range_images = range(1, 129, 5) img_dir = ['/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz/'] # out_file_html='/home/SSD3/maheen-data/horse_human_fiveKP_tps_adam/test_viz.html' img_dir = [ '/home/SSD3/maheen-data/horse_project/tps_train_allKP_adam/test_viz' ] out_file_html = '/home/SSD3/maheen-data/horse_project/tps_train_allKP_adam/test_viz.html' file_post = ['_horse.jpg', '_human.jpg', '_gtwarp.jpg', '_predwarp.jpg'] loss_post = '_loss.npy' comparativeLossViz(img_dir, file_post, loss_post, range_batches, range_images, out_file_html, dir_server) return img_files = [] caption_files = [] out_dir = '/home/SSD3/maheen-data/training_kp_withWarp_test_debug_tps_adam' out_dir = '/home/SSD3/maheen-data/testing_5_kp_withWarp_fixed_adam_debug' out_dir = '/home/SSD3/maheen-data/training_5_kp_withWarp_fixed_adam__1e-05/test' dir_server = '/home/SSD3/maheen-data' out_file_html = os.path.join(out_dir, 'viz.html') for i in range(1, 94): im_file = os.path.join(out_dir, str(i) + '_org.jpg') warp_file = os.path.join(out_dir, str(i) + '_warp.jpg') im_file_small = os.path.join(out_dir, str(i) + '_small_org.jpg') warp_file_small = os.path.join(out_dir, str(i) + '_small_warp.jpg') im_file = util.getRelPath(im_file, dir_server) warp_file = util.getRelPath(warp_file, dir_server) im_file_small = util.getRelPath(im_file_small, dir_server) warp_file_small = util.getRelPath(warp_file_small, dir_server) img_files.append([im_file, warp_file]) # ,im_file_small,warp_file_small]); caption_files.append([str(i) + ' org', str(i) + ' warp']) # ,'small_org','small_warp']); visualize.writeHTML(out_file_html, img_files, caption_files, 224, 224) return out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' num_neighbors = 5 out_file_human = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt') out_file_human_new = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP_noIm.txt') modifyHumanFile(out_file_human, out_file_human_new) # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; out_file_human = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP.txt') out_file_human_new = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP_noIm.txt') modifyHumanFile(out_file_human, out_file_human_new) return # matches_file='/home/maheenrashid/Downloads/knn_5_points_train_list_clean.txt' # face_data_file='/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt'; # # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt'; # face_data_list_file='/home/SSD3/maheen-data/aflw_data/npy/data_list.txt'; # out_dir_meta_horse='/home/SSD3/maheen-data/horse_project/horse'; # out_dir_meta_horse_list=[os.path.join(out_dir_meta_horse,'im'),os.path.join(out_dir_meta_horse,'npy')]; # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; # out_dir_meta_face_list=[os.path.join(out_dir_meta_face,'im'),os.path.join(out_dir_meta_face,'npy')]; # out_dir_meta_face_old='/home/SSD3/maheen-data/horse_project/face'; # out_dir_meta_face_old_list=[os.path.join(out_dir_meta_face_old,'im'),os.path.join(out_dir_meta_face_old,'npy')]; # num_neighbors=5; # out_file_face=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP.txt'); # out_file_horse=os.path.join(out_dir_meta_horse,'matches_'+str(num_neighbors)+'_train_fiveKP.txt'); # missing_files=makeMatchFile(num_neighbors,matches_file,face_data_file,out_dir_meta_horse_list,out_dir_meta_face_list,out_file_horse,out_file_face,out_dir_meta_face_old_list) # return # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; # num_neighbors=5; # out_file_human=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_val_fiveKP.txt'); # out_file_human_new=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_val_fiveKP_noIm.txt'); # # modifyHumanFile(out_file_human,out_file_human_new) # # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; # out_file_human=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP.txt'); # out_file_human_new=os.path.join(out_dir_meta_face,'matches_'+str(num_neighbors)+'_train_fiveKP_noIm.txt'); # # modifyHumanFile(out_file_human,out_file_human_new) # print out_file_human_new; # return # img_files=[]; # caption_files=[]; # out_dir='/home/SSD3/maheen-data/training_kp_withWarp_test_final' # dir_server='/home/SSD3/maheen-data'; # out_file_html=os.path.join(out_dir,'viz.html'); # for i in range(1,94): # im_file=os.path.join(out_dir,str(i)+'.jpg'); # warp_file=os.path.join(out_dir,str(i)+'_warp.jpg'); # im_file=util.getRelPath(im_file,dir_server); # warp_file=util.getRelPath(warp_file,dir_server); # img_files.append([im_file,warp_file]); # caption_files.append(['org','warp']); # visualize.writeHTML(out_file_html,img_files,caption_files,224,224); # return file_horse = '/home/SSD3/maheen-data/horse_project/horse/matches_5_train_fiveKP.txt' out_file_horse = '/home/SSD3/maheen-data/horse_project/horse_resize/matches_5_train_fiveKP.txt' lines = util.readLinesFromFile(file_horse) print len(lines) lines = list(set(lines)) print len(lines) lines = [line_curr.split(' ') for line_curr in lines] im_files = [line_curr[0] for line_curr in lines] npy_files = [line_curr[1] for line_curr in lines] out_dir_meta_old = '/home/SSD3/maheen-data/horse_project/horse/' out_dir_meta_new = '/home/SSD3/maheen-data/horse_project/horse_resize/' replace_paths = [out_dir_meta_old, out_dir_meta_new] args = [] for idx in range(len(im_files)): im_file = im_files[idx] npy_file = npy_files[idx] out_im_file = im_file.replace(replace_paths[0], replace_paths[1]) out_npy_file = npy_file.replace(replace_paths[0], replace_paths[1]) args.append((idx, im_file, npy_file, out_im_file, out_npy_file)) p = multiprocessing.Pool(multiprocessing.cpu_count()) p.map(resizeImAndNpy224, args) out_dir_meta_old = '/home/SSD3/maheen-data/horse_project/horse/' out_dir_meta_new = '/home/SSD3/maheen-data/horse_project/horse_resize/' replace_paths = [out_dir_meta_old, out_dir_meta_new] lines = util.readLinesFromFile(file_horse) lines_new = [ line.replace(replace_paths[0], replace_paths[1]) for line in lines ] util.writeFile(out_file_horse, lines_new) lines = util.readLinesFromFile(out_file_horse) print(len(lines)) im_file = lines[90].split(' ')[0] im = cv2.imread(im_file, 1) labels = np.load(lines[90].split(' ')[1]) for i in xrange(labels.shape[0]): cv2.circle(im, (labels[i][0], labels[i][1]), 2, (0, 0, 255), -1) cv2.imwrite('/home/SSD3/maheen-data/temp/check.png', im) return dir_out = '/home/SSD3/maheen-data/temp/horse_human/viz_transform_aflw_val' visualize.writeHTMLForFolder(dir_out) return out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' num_neighbors = 5 out_file_human = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt') out_file_human_new = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP_noIm.txt') modifyHumanFile(out_file_human, out_file_human_new) # out_dir_meta_face='/home/SSD3/maheen-data/horse_project/aflw'; out_file_human = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP.txt') out_file_human_new = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP_noIm.txt') modifyHumanFile(out_file_human, out_file_human_new) return matches_file = '/home/laoreja/data/knn_res_new/knn_5_points_val_list.txt' face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt' # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt'; face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt' out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse' out_dir_meta_horse_list = [ os.path.join(out_dir_meta_horse, 'im'), os.path.join(out_dir_meta_horse, 'npy') ] out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' out_dir_meta_face_list = [ os.path.join(out_dir_meta_face, 'im'), os.path.join(out_dir_meta_face, 'npy') ] out_dir_meta_face_old = '/home/SSD3/maheen-data/horse_project/face' out_dir_meta_face_old_list = [ os.path.join(out_dir_meta_face_old, 'im'), os.path.join(out_dir_meta_face_old, 'npy') ] num_neighbors = 5 out_file_face = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt') out_file_horse = os.path.join( out_dir_meta_horse, 'matches_' + str(num_neighbors) + '_val_fiveKP.txt') missing_files = makeMatchFile(num_neighbors, matches_file, face_data_file, out_dir_meta_horse_list, out_dir_meta_face_list, out_file_horse, out_file_face, out_dir_meta_face_old_list) return matches_file = '/home/laoreja/data/knn_res_new/knn_5_points_train_list.txt' matches_file = '/home/maheenrashid/Downloads/knn_5_points_train_list_clean.txt' face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt' # face_data_file_old='/home/laoreja/deep-landmark-master/dataset/train/trainImageList.txt'; face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt' out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse' out_dir_meta_horse_list = [ os.path.join(out_dir_meta_horse, 'im'), os.path.join(out_dir_meta_horse, 'npy') ] out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' out_dir_meta_face_list = [ os.path.join(out_dir_meta_face, 'im'), os.path.join(out_dir_meta_face, 'npy') ] out_dir_meta_face_old = '/home/SSD3/maheen-data/horse_project/face' out_dir_meta_face_old_list = [ os.path.join(out_dir_meta_face_old, 'im'), os.path.join(out_dir_meta_face_old, 'npy') ] num_neighbors = 5 out_file_face = os.path.join( out_dir_meta_face, 'matches_' + str(num_neighbors) + '_train_fiveKP.txt') out_file_horse = os.path.join( out_dir_meta_horse, 'matches_' + str(num_neighbors) + '_train_fiveKP.txt') missing_files = makeMatchFile(num_neighbors, matches_file, face_data_file, out_dir_meta_horse_list, out_dir_meta_face_list, out_file_horse, out_file_face, out_dir_meta_face_old_list) return out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' num_neighbors = 5 out_file_face = os.path.join(out_dir_meta_face, 'matches_' + str(num_neighbors) + '.txt') out_file_face_new = os.path.join( out_dir_meta_face, 'matches_noIm_' + str(num_neighbors) + '.txt') # modifyHumanFile(out_file_face,out_file_face_new); # old_data=util.readLinesFromFile(out_file_face); # old_data=[line_curr.split(' ')[1] for line_curr in old_data]; # new_data=util.readLinesFromFile(out_file_face_new); # new_data=[line_curr.split(' ')[0] for line_curr in new_data]; # assert len(old_data)==len(new_data); # for i,old_line in enumerate(old_data): # print i; # assert old_line==new_data[i]; return matches_file = '/home/laoreja/data/knn_res_new/5_points_list.txt' matches_file = '/home/laoreja/data/knn_res_new/knn_train_list.txt' face_data_file = '/home/laoreja/new-deep-landmark/dataset/train/aflw_trainImageList.txt' face_data_list_file = '/home/SSD3/maheen-data/aflw_data/npy/data_list.txt' out_dir_meta_horse = '/home/SSD3/maheen-data/horse_project/horse' out_dir_meta_horse_list = [ os.path.join(out_dir_meta_horse, 'im'), os.path.join(out_dir_meta_horse, 'npy') ] out_dir_meta_face = '/home/SSD3/maheen-data/horse_project/aflw' out_dir_meta_face_list = [ os.path.join(out_dir_meta_face, 'im'), os.path.join(out_dir_meta_face, 'npy') ] num_neighbors = 5 out_file_face = os.path.join(out_dir_meta_face, 'matches_' + str(num_neighbors) + '.txt') out_file_horse = os.path.join(out_dir_meta_horse, 'matches_' + str(num_neighbors) + '.txt') makeMatchFile(num_neighbors, matches_file, face_data_file, out_dir_meta_horse_list, out_dir_meta_face_list, out_file_horse, out_file_face) return # script_saveTrainTxt() # dir_viz='/home/SSD3/maheen-data/temp/horse_human/viz_transform_aflw'; # visualize.writeHTMLForFolder(dir_viz,'.jpg'); return out_dir_meta = '/home/SSD3/maheen-data' face_dir = 'aflw_data' horse_dir = 'horse_data' num_neighbors = 5 path_replace_horse = [ '/home/laoreja/data/horse-images/annotation', os.path.join(out_dir_meta, horse_dir, 'im') ] path_replace_face = ['/npy/', '/im/'] new_match_file = os.path.join(out_dir_meta, face_dir, 'match_' + str(num_neighbors) + '.txt') out_face_train_file = os.path.join( out_dir_meta, face_dir, 'match_' + str(num_neighbors) + '_train.txt') out_horse_train_file = os.path.join( out_dir_meta, horse_dir, 'match_' + str(num_neighbors) + '_train.txt') horse_txt_file = os.path.join(out_dir_meta, horse_dir, 'train.txt') face_txt_file = os.path.join(out_dir_meta, face_dir, 'train.txt') horse_train = util.readLinesFromFile(horse_txt_file) horse_train_just_beginning = [ horse_curr.split(' ')[0] for horse_curr in horse_train ] horse_train_just_beginning = [ horse_curr[:horse_curr.rindex('.')] for horse_curr in horse_train_just_beginning ] print horse_train_just_beginning[0] face_train = util.readLinesFromFile(face_txt_file) face_train_just_beginning = [ face_curr.split(' ')[0] for face_curr in face_train ] face_train_just_beginning = [ face_curr[:face_curr.rindex('.')] for face_curr in face_train_just_beginning ] print len(horse_train) print horse_train[0] print len(face_train) print face_train[0] # return matches = util.readLinesFromFile(new_match_file) print(len(matches)) matches = [match_curr.split(' ') for match_curr in matches] horse_matches = [] face_matches = [] for match_curr in matches: assert len(match_curr) == num_neighbors + 1 horse_curr = match_curr[0] horse_curr_path, horse_name = os.path.split(horse_curr) if horse_curr_path[-3:] == 'gxy': horse_curr_path = horse_curr_path[:-3] horse_curr_path = horse_curr_path.replace(path_replace_horse[0], path_replace_horse[1]) horse_curr = os.path.join(horse_curr_path, horse_name[:horse_name.rindex('.')]) if horse_curr in horse_train_just_beginning: horse_match = horse_train[horse_train_just_beginning.index( horse_curr)] else: # print horse_curr # print match_curr[0]; # raw_input(); continue for face_curr in match_curr[1:]: face_curr = face_curr[:face_curr.rindex('.')] face_curr = face_curr.replace(path_replace_face[0], path_replace_face[1]) face_match = face_train[face_train_just_beginning.index(face_curr)] horse_matches.append(horse_match) face_matches.append(face_match) # print match_curr; # print match_curr[0]; # for idx,i in enumerate(match_curr[1:]): # print idx,face_matches[idx],i,horse_matches[idx] assert len(face_matches) == len(horse_matches) print len(face_matches) util.writeFile(out_face_train_file, face_matches) util.writeFile(out_horse_train_file, horse_matches) return # face_dir='/home/SSD3/maheen-data/face_data'; # train_txt=os.path.join(face_dir,'train.txt'); # files=util.readLinesFromFile(train_txt); # files=[file_curr.split(' ') for file_curr in files]; # [im_files,npy_files]=zip(*files); # for idx,npy_file in enumerate(npy_files): # print idx,len(npy_files); # assert os.path.exists(npy_file); # assert np.load(npy_file).shape[1]==3; # print len(im_files); # print (im_files[0]); # print len(npy_files); # print (npy_files[0]); dir_viz = '/home/SSD3/maheen-data/temp/horse_human/viz_transform' visualize.writeHTMLForFolder(dir_viz, '.jpg') return horse_data = '/home/SSD3/maheen-data/horse_data' new_face_data = '/home/SSD3/maheen-data/face_data' old_txt = 'train.txt' num_to_keep = 10 new_txt = 'train_' + str(num_to_keep) + '.txt' for data_type in [horse_data, new_face_data]: lines_new = util.readLinesFromFile(os.path.join(data_type, old_txt)) random.shuffle(lines_new) lines_new = lines_new[:num_to_keep] file_new = os.path.join(data_type, new_txt) util.writeFile(file_new, lines_new) print len(lines_new), file_new return
def script_makeBboxPairFiles(params): path_txt = params.path_txt path_pre = params.path_pre type_data = params.type_data out_dir_meta = params.out_dir_meta out_dir_im = params.out_dir_im out_dir_npy = params.out_dir_npy out_file_list_npy = params.out_file_list_npy out_file_list_im = params.out_file_list_im out_file_pairs = params.out_file_pairs overwrite = params.overwrite util.mkdir(out_dir_im) util.mkdir(out_dir_npy) if type_data == 'face': path_im, bbox, anno_points = parseAnnoFile(path_txt, path_pre, face=True) else: path_im, bbox, anno_points = parseAnnoFile(path_txt, path_pre, face=False) args = [] args_bbox_npy = [] data_pairs = [] for idx, path_im_curr, bbox_curr, key_pts in zip(range(len(path_im)), path_im, bbox, anno_points): path_curr, file_name = os.path.split(path_im_curr) file_name = file_name[:file_name.rindex('.')] path_curr = path_curr.split('/') if type_data == 'horse': if path_curr[-1] == 'gxy': path_pre_curr = path_curr[-2] else: path_pre_curr = path_curr[-1] else: path_pre_curr = path_curr[-1] if type_data == 'aflw': file_name = file_name + '_' + str(idx) out_dir_curr = os.path.join(out_dir_im, path_pre_curr) out_dir_npy_curr = os.path.join(out_dir_npy, path_pre_curr) util.mkdir(out_dir_curr) util.mkdir(out_dir_npy_curr) # out_file=os.path.join(out_dir_curr,file_name); out_file = os.path.join(out_dir_curr, file_name + '.jpg') out_file_npy = os.path.join(out_dir_npy_curr, file_name + '.npy') data_pairs.append((out_file, out_file_npy)) if not os.path.exists(out_file) or overwrite: args.append((path_im_curr, out_file, bbox_curr, idx)) if not os.path.exists(out_file_npy) or overwrite: args_bbox_npy.append((bbox_curr, key_pts, out_file_npy, idx)) p = multiprocessing.Pool(multiprocessing.cpu_count()) p.map(saveBBoxIm, args) # p.map(saveBBoxNpy,args_bbox_npy); data_list_npy = [arg_curr[2] for arg_curr in args_bbox_npy] data_list_im = [arg_curr[1] for arg_curr in args] util.writeFile(out_file_list_npy, data_list_npy) util.writeFile(out_file_list_im, data_list_im) data_pairs = [pair[0] + ' ' + pair[1] for pair in data_pairs] util.writeFile(out_file_pairs, data_pairs)
def main(): all_dirs_file = '/disk2/februaryExperiments/training_jacob/all_dirs.txt' command_file_pre = '/disk2/februaryExperiments/training_jacob/commands_training_data_' train_data_file = '/disk2/februaryExperiments/training_jacob/caffe_files/train.txt' check_file = 'done.mat' num_proc = 12 # command_files = script_writeCommandsForPreprocessing(all_dirs_file,command_file_pre,num_proc,check_file); all_dirs = util.readLinesFromFile(all_dirs_file) # all_dirs=all_dirs[:10]; random.shuffle(all_dirs) strings = [] for no_dir_curr, dir_curr in enumerate(all_dirs): print no_dir_curr, dir_curr dir_curr = dir_curr[:-1] curr_flos = [ os.path.join(dir_curr, curr_flo) for curr_flo in os.listdir(dir_curr) if curr_flo.endswith('.tif') ] for curr_flo in curr_flos: curr_im = curr_flo.replace('.tif', '.jpg') assert os.path.exists(curr_im) string_curr = curr_im + ' ' + curr_flo + ' ' strings.append(string_curr) print len(strings) # print strings[:3]; # random.shuffle(strings); util.writeFile(train_data_file, strings) # with open (train_data_file,'wb') as f: # for im_curr,flo_curr in zip(ims,flos): # string_curr=im_curr+' '+flo_curr+'\n'; # f.write(string_curr); return dirs = getRemainingDirs(util.readLinesFromFile(all_dirs_file), check_file) last_lines = [] for dir_curr in dirs: last_lines.append( util.readLinesFromFile(os.path.join(dir_curr, 'log.txt'))[-2]) print set(last_lines) return meta_dirs_image = [ '/disk2/image_data_moved', '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data' ] meta_dirs_flo = [ '/disk2/flow_data', '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data' ] sub_dirs_file = 'all_sub_dirs.txt' out_dir = '/disk2/februaryExperiments/training_jacob' out_file_correspondences = os.path.join(out_dir, 'im_flo_correspondences.p') proto_file = 'deploy.prototxt' out_file = os.path.join(out_dir, 'im_flo_files.p') out_dir = '/disk2/februaryExperiments/training_jacob/training_data' mat_file = 'im_flo_files.mat' if not os.path.exists(out_dir): os.mkdir(out_dir) im_flo_dirs = pickle.load(open(out_file_correspondences, 'rb')) [im_dirs, flo_dirs] = zip(*im_flo_dirs) for im_dir, flo_dir in im_flo_dirs: script_saveMatFiles(flo_dir, im_dir, out_dir, mat_file, proto_file)
raise PException('File "{}.a" not found', inf) shutil.copyfile(inf, problem["input"]) shutil.copyfile(inf + ".a", problem["answer"]) return 0 parser_input = subparsers.add_parser('input', aliases='i', help='Copies test file', description=""" Copies specified test and answer files from testset. """) parser_input.add_argument('test', metavar='TEST', help='test to copy') parser_input.set_defaults(func=input) if __name__ == "__main__": try: options = parser.parse_args(sys.argv[1:]) util.options = options exit(options.func(options)) except PException as e: log.error(e.message) util.writeFile("t.out", e.message) exit(1) except KeyboardInterrupt: log.warning("Interrupted by ^C") exit(1) except WindowsError as e: log.error("SYSTEM ERROR: {}", str(e)) exit(1)
def saveHTML(out_us, us_test, batch_size=50, num_iter=2, justHTML=False): dir_server = './' post_us = ['_gt_pts.npy', '_pred_pts.npy'] im_paths, gt_pt_files, pred_pt_files = us_getFilePres( us_test, out_us, post_us, num_iter, batch_size) if justHTML: post_ims_us = [ '_org_nokp.jpg', '_gt.jpg', '_warp_nokp.jpg', '_warp.jpg', '_org.jpg', ] captions_for_row = [ 'Input', 'Ground Truth', 'Warped Image', 'Prediction Warped', 'Prediction' ] out_file_html = os.path.join(out_us, 'results.html') idx_sort = range(len(gt_pt_files)) ims = [] captions = [] for idx_idx, idx_curr in enumerate(idx_sort): file_curr = gt_pt_files[idx_curr] file_curr = os.path.split(file_curr)[1] file_curr = file_curr[:file_curr.index('_gt')] files_us = [ os.path.join(dir_server, file_curr + post_im_curr) for post_im_curr in post_ims_us ] captions_us = [ str(idx_idx) + ' ' + caption_curr for caption_curr in captions_for_row ] ims.append(files_us) captions.append(captions_us) visualize.writeHTML(out_file_html, ims, captions) print out_file_html else: errors_curr = us_getErrorsAll(us_test, out_us, post_us, num_iter, batch_size) err = np.array(errors_curr) bin_keep = err >= 0 err[err < 0] = 0 div = np.sum(bin_keep, 1) sum_val = np.sum(err, 1).astype(np.float) avg = sum_val / div post_ims_us = [ '_org_nokp.jpg', '_gt.jpg', '_warp_nokp.jpg', '_warp.jpg', '_org.jpg', ] captions_for_row = [ 'Input', 'Ground Truth', 'Warped Image', 'Prediction Warped', 'Prediction' ] out_file_html = os.path.join(out_us, 'results.html') idx_sort = np.argsort(avg) ims = [] captions = [] for idx_idx, idx_curr in enumerate(idx_sort): file_curr = gt_pt_files[idx_curr] file_curr = os.path.split(file_curr)[1] file_curr = file_curr[:file_curr.index('_gt')] files_us = [ os.path.join(dir_server, file_curr + post_im_curr) for post_im_curr in post_ims_us ] captions_us = [ str(idx_idx) + ' ' + caption_curr for caption_curr in captions_for_row ] ims.append(files_us) captions.append(captions_us) visualize.writeHTML(out_file_html, ims, captions) print out_file_html labels = ['Ours'] # ,'thems']; ticks = ['LE', 'RE', 'N', 'LM', 'RM', 'ALL'] colors = ['b'] # ,'g']; ylim = None errors_all = [] errors_curr = us_getErrorsAll(us_test, out_us, post_us, num_iter, batch_size) failures, failures_kp = getErrRates(errors_curr, 0.1) errors_all.append(errors_curr) # errors_all.append(errors_curr[:]) out_file_kp_err = os.path.join(out_us, 'bar.pdf') err_rates_all = plotComparisonKpError(errors_all, out_file_kp_err, ticks, labels, colors=colors, ylim=ylim) out_file_stats = os.path.join(out_us, 'stats.txt') # print err_rates_all; string = [ str(ticks[idx_num_curr]) + ' ' + str(num_curr) for idx_num_curr, num_curr in enumerate(err_rates_all[0]) ] print string # print failures,failures_kp # print errors_all # string=' '.join(string); util.writeFile(out_file_stats, string)
def main(): # dir_meta='/disk2/flow_data'; # dir_meta_old='/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data'; # deploy_file='deploy.prototxt'; # dir_mids=[os.path.join(dir_meta,dir_mid) for dir_mid in os.listdir(dir_meta) if os.path.isdir(os.path.join(dir_meta,dir_mid))] # dirs_left=[os.path.join(dir_mid,dir_curr) for dir_mid in dir_mids for dir_curr in os.listdir(dir_mid) if os.path.isdir(os.path.join(dir_mid,dir_curr))] # dirs_left=[os.path.join(dir_mid,dir_curr) for dir_mid in dirs_left for dir_curr in os.listdir(dir_mid) if os.path.isdir(os.path.join(dir_mid,dir_curr))] # print len(dirs_left); # print dirs_left[0]; # for dir_curr in dirs_left: # deploy_curr=os.path.join(dir_curr,deploy_file); # print deploy_curr # data=[]; # with open(deploy_curr,'r') as f: # data = f.read() # with open(deploy_curr+'_backup','w') as f: # f.write(data); # data = data.replace(dir_meta_old, dir_meta) # with open(deploy_curr, "w") as f: # f.write(data); # return # video_list_file='/disk2/video_data/video_list.txt' # path_to_video_meta='/disk2/video_data'; # path_to_flo_meta='/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data'; # path_to_im_meta='/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data'; # video_files=util.readLinesFromFile(video_list_file); # # image_dirs=[dir_curr.replace(path_to_video_meta,path_to_im_meta)[:-4] for dir_curr in video_files]; # # flo_dirs=[dir_curr.replace(path_to_video_meta,path_to_flo_meta)[:-4] for dir_curr in video_files]; # flo_dirs=pickle.load(open('/disk2/temp/dirs_done.p','rb')); # image_dirs=[dir_curr.replace(path_to_flo_meta,path_to_im_meta) for dir_curr in flo_dirs]; # print len(image_dirs) # out_dir='/disk2/image_data_moved'; # out_file='/disk2/image_data_moved/mv_commands_2.txt' # commands=[]; # image_dirs_to_move=image_dirs[5000:7000]; # for image_dir in image_dirs_to_move: # image_dir=util.escapeString(image_dir); # new_dir=image_dir.replace(path_to_im_meta,out_dir); # command='mkdir -p '+new_dir+';'; # command=command+'mv '+image_dir+'/* '+new_dir; # commands.append(command); # util.writeFile('/disk2/image_data_moved/dirs_moved_2.txt',image_dirs_to_move); # util.writeFile(out_file,commands); # return video_list_file = '/disk2/video_data/video_list.txt' path_to_video_meta = '/disk2/video_data' # path_to_flo_meta='/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data'; path_to_flo_meta = '/disk2/flow_data' path_to_im_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data' video_files = util.readLinesFromFile(video_list_file) # image_dirs=[dir_curr.replace(path_to_video_meta,path_to_im_meta)[:-4] for dir_curr in video_files]; # flo_dirs=[dir_curr.replace(path_to_video_meta,path_to_flo_meta)[:-4] for dir_curr in video_files]; flo_dirs = pickle.load(open('/disk2/temp/dirs_done_disk2.p', 'rb')) image_dirs = [ dir_curr.replace(path_to_flo_meta, path_to_im_meta) for dir_curr in flo_dirs ] print len(image_dirs) finished = [] i = 0 for image_dir, flo_dir in zip(image_dirs, flo_dirs): print i count_im_command = 'ls ' + os.path.join(util.escapeString(image_dir), '*.ppm') + '| wc -l' count_flo_command = 'ls ' + os.path.join(util.escapeString(flo_dir), '*.flo') + '| wc -l' # im_count=int(subprocess.check_output(count_im_command,shell=True)); # flo_count=int(subprocess.check_output(count_flo_command,shell=True)); im_count = len([ file_curr for file_curr in os.listdir(image_dir) if file_curr.endswith('.ppm') ]) flo_count = len([ file_curr for file_curr in os.listdir(flo_dir) if file_curr.endswith('.flo') ]) print i, flo_count, im_count if flo_count + 1 == im_count: finished.append(1) else: finished.append(0) i += 1 finished = np.array(finished) print 'done', sum(finished == 1) print 'not done', sum(finished == 0) pickle.dump([finished, image_dirs], open('/disk2/temp/to_rerun.p', 'wb')) return dir_flownet_meta = '/home/maheenrashid/Downloads/flownet/flownet-release/models/flownet' caffe_bin = os.path.join(dir_flownet_meta, 'bin/caffe') path_to_model = os.path.join(dir_flownet_meta, 'model/flownet_official.caffemodel') video_list_file = '/disk2/video_data/video_list.txt' path_to_video_meta = '/disk2/video_data' in_dir_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data' in_dir_meta = '/disk2/flow_data' # if not os.path.exists(new_in_dir_meta): # os.mkdir(new_in_dir_meta); deploy_name = 'deploy.prototxt' gpu = 0 dirs = [ dir_curr.replace(path_to_video_meta, in_dir_meta)[:-4] for dir_curr in util.readLinesFromFile(video_list_file) ] dirs = [dir_curr for dir_curr in dirs if os.path.exists(dir_curr)] counts = [ len(os.listdir(dir_curr)) for dir_curr in dirs if os.path.exists(dir_curr) ] dirs_left = [] dirs_done = [] for idx_count, count in enumerate(counts): if count == 4: dirs_left.append(dirs[idx_count]) # dir_curr=dirs[idx_count] # deploy_curr=os.path.join(dir_curr,deploy_name); # im_file=os.path.join(dir_curr,'im_1.txt'); # batch_size = sum(1 for line in open(im_file)) # old_str='batch_size: '+str(int(ceil(batch_size/5))); # print old_str, # batch_size = int(ceil(batch_size/8)); # new_str='batch_size: '+str(batch_size); # print new_str # data=[]; # with open(deploy_curr,'r') as f: # data = f.read() # # print data[:300]; # assert old_str in data; # data = data.replace(old_str, new_str) # # print data[:300]; # with open(deploy_curr, "w") as f: # f.write(data); # out_dir_curr=dir_curr.replace(in_dir_meta,new_in_dir_meta); #mkdir of new location # mkdir_command='mkdir -p '+util.escapeString(out_dir_curr) # print mkdir_command # subprocess.call(mkdir_command, shell=True) #mv contents from old to new # mv_command='mv '+util.escapeString(dir_curr)+'/* '+util.escapeString(out_dir_curr); # print mv_command # subprocess.call(mv_command, shell=True) #append new to dirs_left # dirs_left.append(out_dir_curr); # raw_input(); else: dirs_done.append(dirs[idx_count]) print min(counts) counts = np.array(counts) print sum(counts == 4) print len(dirs_left) mid_point = len(dirs_left) / 2 print mid_point, len(dirs_left) - mid_point out_file_commands = '/disk2/januaryExperiments/gettingFlows/flownet_commands_left_0.txt' gpu = 0 # writeCommands_hacky(out_file_commands,dirs_left[:mid_point],caffe_bin,deploy_name,path_to_model,gpu) out_file_commands = '/disk2/januaryExperiments/gettingFlows/flownet_commands_left_1.txt' gpu = 1 # writeCommands_hacky(out_file_commands,dirs_left[mid_point:],caffe_bin,deploy_name,path_to_model,gpu) print len(dirs_done) pickle.dump(dirs_done, open('/disk2/temp/dirs_done_disk2.p', 'wb')) return video_list_file = '/disk2/video_data/video_list.txt' path_to_video_meta = '/disk2/video_data' in_dir_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data' out_dir_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/flow_data' path_to_deploy = '/disk2/januaryExperiments/gettingFlows/deploy_template.prototxt' out_file_commands = '/disk2/januaryExperiments/gettingFlows/flownet_commands.txt' dir_flownet_meta = '/home/maheenrashid/Downloads/flownet/flownet-release/models/flownet' path_to_sizer = os.path.join(dir_flownet_meta, 'bin/get_image_size') caffe_bin = os.path.join(dir_flownet_meta, 'bin/caffe') path_to_model = os.path.join(dir_flownet_meta, 'model/flownet_official.caffemodel') text_1 = 'im_1.txt' text_2 = 'im_2.txt' deploy_file = 'deploy.prototxt' gpu = 0 params_dict = {} params_dict['video_list_file'] = video_list_file params_dict['path_to_video_meta'] = path_to_video_meta params_dict['in_dir_meta'] = in_dir_meta params_dict['out_dir_meta'] = out_dir_meta params_dict['path_to_deploy'] = path_to_deploy params_dict['out_file_commands'] = out_file_commands params_dict['dir_flownet_meta'] = dir_flownet_meta params_dict['path_to_sizer'] = path_to_sizer params_dict['caffe_bin'] = caffe_bin params_dict['path_to_model'] = path_to_model params_dict['text_1'] = text_1 params_dict['text_2'] = text_2 params_dict['deploy_file'] = deploy_file params_dict['gpu'] = gpu params = createParams('writeFlownetCommands') params = params(**params_dict) # script_writeFlownetCommands(params); commands = util.readLinesFromFile(params.out_file_commands) commands = [c.replace('-gpu 1', '-gpu 0') for c in commands] util.writeFile(params.out_file_commands, commands) pickle.dump(params._asdict(), open(params.out_file_commands + '_meta_experiment.p', 'wb')) return video_list_file = '/disk2/video_data/video_list.txt' path_to_im_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data' path_to_video_meta = '/disk2/video_data' commands_file_text = '/disk2/januaryExperiments/gettingFlows/resize_commands.txt' video_list = util.readLinesFromFile(video_list_file) print len(video_list) image_dirs = [ video_curr.replace(path_to_video_meta, path_to_im_meta)[:-4] for video_curr in video_list ] print len(image_dirs), image_dirs[0] image_dirs = image_dirs[:1] commands = [] command_conv = ['convert', '-resize 512x384'] for image_dir in image_dirs: image_list = [ os.path.join(image_dir, im) for im in os.listdir(image_dir) if im.endswith('.ppm') ] for image_curr in image_list: command_curr = [ command_conv[0], image_curr, command_conv[1], image_curr ] command_curr = ' '.join(command_curr) commands.append(command_curr) print len(commands) print commands[0] util.writeFile(commands_file_text, commands) return video_list_file = '/disk2/video_data/video_list.txt' path_to_im_meta = '/media/maheenrashid/e5507fe3-2bff-4cbe-bc63-400de6deba92/maheen_data/image_data' path_to_video_meta = '/disk2/video_data' path_to_txt_1 = '/disk2/januaryExperiments/gettingFlows/temp_im_1.txt' path_to_txt_2 = '/disk2/januaryExperiments/gettingFlows/temp_im_2.txt' video_list = util.readLinesFromFile(video_list_file) print len(video_list) image_dirs = [ video_curr.replace(path_to_video_meta, path_to_im_meta)[:-4] for video_curr in video_list ] print len(image_dirs), image_dirs[0] list_1 = [] list_2 = [] for image_dir in image_dirs[:10]: list_1_curr, list_2_curr = getImageListForFlow(image_dir) list_1.extend(list_1_curr[:3]) list_2.extend(list_2_curr[:3]) assert len(list_1) == len(list_2) util.writeFile(path_to_txt_1, list_1) util.writeFile(path_to_txt_2, list_2)