def question_candidates(q_id): #Incomplete '''Select some useful subset of the candidates for a particular question. Return them in a list. ''' all_chunks = chunker.run(q_id) return all_chunks[:40] # need better way to filter
def write(start, stop, filename): d = {} for x in range(start, stop): all_chunks = chunker.run(x) print x d[x] = all_chunks cache_chunkers.cache_chunks(d, open(filename, "r+"))
def write (start,stop,filename): d = {} for x in range(start,stop): all_chunks = chunker.run(x) print x d[x] = all_chunks cache_chunkers.cache_chunks(d, open(filename, "r+"))
def filter_chunks(q_id): answers = chunker.run(q_id) my_answers = [] qList = read_questions.read_questions_no_answers() qDict = {} b = 0 for q in qList: for q2 in q: if b == 0: qN = q2 b = 1 else: qDict[qN] = q2 b = 0 for key in qDict: if key == str(q_id): for ansCandidate in answers: print ansCandidate passed = pos.pos_test(qDict[key], ansCandidate) if (passed != 0): my_answers.append(ansCandidate) return my_answers
def filter_chunks(q_id): answers = chunker.run(q_id) my_answers = [] qList = read_questions.read_questions_no_answers() qDict = {} b = 0 for q in qList: for q2 in q: if b == 0: qN = q2 b = 1 else: qDict[qN] = q2 b = 0 for key in qDict: if key == str(q_id): for ansCandidate in answers: print ansCandidate passed = pos.pos_test(qDict[key], ansCandidate) if (passed !=0): my_answers.append(ansCandidate) return my_answers
# Turns out I don't see any questions where a VP is likely, so I made VP = 0.1, 0 might even be better. def pos_test(question, (answer, doc_num, index, features, q_id)): result = 0 if features == "NP": result = 1 if features == "VP": result = 0.1 return result if __name__ == "__main__": testQ = 213 answers = run(testQ) #print answers #for ansCandidate in answers: #print ansCandidate[0], ansCandidate[3] # Again I stick the questions into a dictionary qList = read_questions.read_questions_no_answers() qDict = {} b = 0 for q in qList: for q2 in q: if b == 0: qN = q2 b = 1 else: qDict[qN] = q2
# PP and S get 0 so they will not be part of our answer. # Turns out I don't see any questions where a VP is likely, so I made VP = 0.1, 0 might even be better. def pos_test(question, (answer, doc_num, index, features,q_id)): result = 0 if features == "NP": result = 1 if features == "VP": result = 0.1 return result if __name__ == "__main__": testQ = 213 answers = run(testQ) #print answers #for ansCandidate in answers: #print ansCandidate[0], ansCandidate[3] # Again I stick the questions into a dictionary qList = read_questions.read_questions_no_answers() qDict = {} b = 0 for q in qList: for q2 in q: if b == 0: qN = q2 b = 1 else: qDict[qN] = q2
def run(dataset_dir, madfile, stages): # verify that carving directory exists, which will contain all # intermediary and output files for this code carvedir = dataset_filepaths.get_carving_dir(dataset_dir) if not os.path.exists(carvedir): os.makedirs(carvedir) # filter the raw range scans to include statistical information # about the scanner intrinsics if 1 in stages: print "" print "STAGE 1 - GENERATING SCAN STATISTICS" print "" ret = filter_urg_scans.run(dataset_dir) if ret != 0: return -1 # an error occurred ret = filter_tango_scans.run(dataset_dir) if ret != 0: return -1 # an error occurred # convert the mad file into a noisypath file if 2 in stages: print "" print "STAGE 2 - GENERATING PATH STATISTICS" print "" ret = noisypath_gen.run(dataset_dir, madfile) if ret != 0: return -2 # an error occurred # run the wedge generation program on input scans if 3 in stages: print "" print "STAGE 3 - GENERATING WEDGES" print "" path_file = dataset_filepaths.get_noisypath_file(dataset_dir) ret = wedge_gen.run(dataset_dir, path_file) if ret != 0: return -3; # an error occurred # run the chunker program on the resulting wedge file if 4 in stages: print "" print "STAGE 4 - RUNNING CHUNKER" print "" ret = chunker.run(dataset_dir) if ret != 0: return -4 # an error occurred # run the procarve program on the output chunks if 5 in stages: print "" print "STAGE 5 - CARVING" print "" ret = procarve.run(dataset_dir) if ret != 0: return -5 # an error occurred # generate wall samples and floorplan from carving if 6 in stages: print "" print "STAGE 6 - GENERATING FLOORPLAN" print "" ret = oct2fp.run(dataset_dir, madfile); if ret != 0: return -6 # an error occurred # run the fp optimization program on resulting octree if 7 in stages: print "" print "STAGE 7 - ALIGNING FLOORPLAN TO CARVING" print "" ret = fp_optimizer.run(dataset_dir) if ret != 0: return -7 # an error occurred # merge the floorplans and carving into one file if 8 in stages: print "" print "STAGE 8 - MERGING CARVING AND FLOORPLAN" print "" ret = merge_fp_oct.run(dataset_dir) if ret != 0: return -8 # an error ocurred # run the octsurf program to generate mesh if 9 in stages: print "" print "STAGE 9 - GENERATING SURFACE" print "" ret = octsurf.run(dataset_dir) if ret != 0: return -9 # an error occurred # success return 0