def __init__(self, dg_model, gtruth_tf, dname=None): #dname should be d8, d1, d8_full, or d3 right now self.dname = dname self.dg_model = dg_model self.gtruth_tf = gtruth_tf self.region_to_topology = get_region_to_topo_hash_containment(self.gtruth_tf, self.dg_model) self.extractor = dir_util.direction_parser_sdc()
def __init__(self, m4du, rndf_file): self.lc = lcm.LCM() #load the model print "Loading model..." self.dg_model = cPickle.load(open(m4du, 'r')) self.rndf = ru.rndf(rndf_file, True) self.sdc_parser = direction_parser_sdc() self.trans_xyz = (0, 0, 0) self.trans_latlon = self.rndf.origin self.trans_theta = 0 self.run_inference = True self.waypoints = [] self.cmd = None self.curr_location = None self.curr_orientation = None # Change these two for testing; to be replaced by LCM message #self.curr_location = (0, 0, 0) #self.cmd = str("Go to the truck") self.lc.subscribe("DIRECTION_INPUT", self.on_comment_msg) self.lc.subscribe("POSE", self.on_pose_msg) self.lc.subscribe("GPS_TO_LOCAL", self.on_transform_msg)
def __init__(self, m4du, region_tagfile, map_filename): self.lc = lcm.LCM() #load the model print "Loading model..." self.dg_model = m4du self.sdc_parser = direction_parser_sdc() # for k1 in dg_model.tmap.keys(): # print "====" # for k2 in dg_model.tmap[k1]: # print k2 # print dg_model.tmap_locs[k1] # print dg_model.tmap_locs[k2] #get the topo_to_region_hash self.dataset_name = region_tagfile.split("/")[-1].split("_")[0] tf_region = tag_file(region_tagfile, map_filename) self.topo_to_region = get_topo_to_region_hash(tf_region, self.dg_model) self.waypoints = [] self.cur_pose = None self.lc.subscribe("SPEECH_COMMANDS", self.on_speech_msg) self.lc.subscribe("POSE", self.on_pose_msg) self.cmd = None
def toggleSdcExtractor(self, state): if state == Qt.Checked: self.extractor = dir_util.direction_parser_wizard_of_oz( self.corpus_fname, "stefie10") elif state == Qt.Unchecked: self.extractor = dir_util.direction_parser_sdc() elif state == Qt.PartiallyChecked: pass else: raise ValueError("Unepxected state: " + ` state `)
def run_everything(model_filename): global curr_goal, curr_location, dg_model, sdc_parser, is_goal dg_model = cPickle.load(open(model_filename, 'r')) sdc_parser = direction_parser_sdc() is_goal = Value('i', 0) curr_goal = Array('d', [sys.maxint, sys.maxint]) curr_location = Array('d', [sys.maxint, sys.maxint, sys.maxint]) p2 = Process(target=navigator_run, args=(is_goal, curr_goal, curr_location)) print "started the gui" p2.start() #p2.join() create_gui()
def generate(out_fn, model_fn, num_to_generate=50, sent_number=0): # generate questions and print them and their properties #parsing the arguments if num_to_generate in [None, ""]: num_to_generate = 50 else: num_to_generate = int(num_to_generate) if sent_number in [None, ""]: sent_number = 0 else: sent_number = int(sent_number) #load the model and the result m4du = cPickle.load(open(model_fn, 'r')) m4du = topN.model(m4du) save_data = cPickle.load(open(out_fn, 'r')) sdc_parser = direction_parser_sdc() print "num_sentences =", len(save_data["sentences"]) #use the first inference for testing purposes sentence = save_data["sentences"][sent_number] tn_paths = save_data["paths_topN"][sent_number] tn_probs = save_data["probability_topN"][sent_number] topN_paths = [(tn_paths[i], tn_probs[i]) for i in range(len(tn_paths))] sdcs = sdc_parser.extract_SDCs(sentence) sdcs = m4du.m4du.get_usable_sdc(sdcs) all_qns = question_sdc_landmark.question_seq_sdc_landmark( sdcs, m4du, topN_paths) for i in range(num_to_generate): q = all_qns.next_question() print q.qn_text print q.ans_to_actions
def dialog(out_fn, model_fn, gtruth_tag_fn, output_dir, num_to_generate=50, num_to_ask=2, sent_number=0, objective="deltaH"): # from environ_vars import TKLIB_HOME # map_fn=TKLIB_HOME+"/data/directions/direction_floor_8_full/direction_floor_8_full_filled.cmf.gz" #assuming the topN inference is run beforehand #parsing the arguments if num_to_generate in [None, ""]: num_to_generate = 50 else: num_to_generate = int(num_to_generate) if num_to_ask in [None, ""]: num_to_ask = 2 else: num_to_ask = int(num_to_ask) if sent_number in [None, ""]: sent_number = 0 else: sent_number = int(sent_number) if objective in [None, ""]: objective = "deltaH" #load the model and the result save_data = cPickle.load(open(out_fn, 'r')) N = len(save_data["paths_topN"][0]) m4du_base = cPickle.load(open(model_fn, 'r')) m4du = topN.model(m4du_base, N) #function that gives us the orientations for a sentence orient = get_orientations_annotated mystart, myend = save_data['regions'][sent_number].split('to') mystart = mystart.strip() print "MY START ", mystart # tf = tag_file(gtruth_tag_fn, map_fn) print "starting at region:", save_data["start_regions"][sent_number] dataset_name = gtruth_tag_fn.split("/")[-1].split("_")[0] orientation = orient(deepcopy(m4du), mystart, dataset_name)[0] # orientation = orient(m4du, mystart, dataset_name) print "ORIENTATION: ", orientation topohash = save_data["region_to_topology"] # sloc = m4du.m4du.tmap_locs[topohash[mystart][0]] sloc = m4du.tmap_locs[topohash[mystart][0]] #use the first inference for testing purposes orig_path = save_data["orig_path"][sent_number] print "Original path\n", orig_path sentence = save_data["sentences"][sent_number] tn_paths = save_data["paths_topN"][sent_number] tn_probs = save_data["probability_topN"][sent_number] topN_paths = [(tn_paths[i], tn_probs[i]) for i in range(len(tn_paths))] #get_sdcs sdc_parser = direction_parser_sdc() sdcs = sdc_parser.extract_SDCs(sentence) print "SDC before", sdcs print type(sdcs) print type(sdcs[0]), sdcs[0] print str(type(sdcs[0])) # return sdcs = m4du.m4du.get_usable_sdc(sdcs) print "SDC after", sdcs print type(sdcs) print type(sdcs[0]), sdcs[0] print str(type(sdcs[0])) # return #previous questions previous_qns = [] #choose objective if objective == "deltaH": select_questions = deltaH.question_selector elif objective == "random": select_questions = random_qn.random_qn elif objective == "distance_RM": select_questions = distance_reward_matrix.question_selector_destination_distance elif objective == "exact_path_RM": select_questions = exact_path_reward_matrix.question_selector_exact_path elif objective == "same_dest_RM": select_questions = same_destination_reward_matrix.question_selector_same_destination # Set up the helper functions. This will add variety in the dialog. qn_generator = question_sdc_landmark.question_seq_sdc_landmark ask_question = allways_truthful.allways_truthful dialog_data = dialog_data_structure() dialog_data["N"] = N dialog_data["objective"] = objective dialog_data["sentence_number"] = sent_number dialog_data["num_to_ask"] = num_to_ask dialog_data["num_to_gen"] = num_to_generate dialog_data["initial_save_data"] = save_data dialog_data["paths_topN"].append(topN_paths) dialog_dump_fn = output_dir + name_for_dialog_data(dialog_data) cPickle.dump(dialog_data, open(dialog_dump_fn, 'w')) print "Saved at", dialog_dump_fn for curr_question_number in range(num_to_ask): print "Looping the dialog" #so that the loop is non-empty #run question generation and select a bunch of questions, & evaluate them all_qns = qn_generator(sdcs, m4du, topN_paths) questions = [] for qn_i in range(num_to_generate): questions.append(all_qns.next_question()) questions = filter(lambda x: x != None, questions) questions = filter( lambda x: bool(x.qn_text not in map(lambda x: x.qn_text, previous_qns)), questions) #select questions best_question, objective_value = select_questions(questions, m4du, sdcs, topN_paths, previous_qns, answers=ask_question) previous_qns.append(best_question) dialog_data["selected_questions"].append(best_question) #prompt for answer answer, ans_dist = ask_question(best_question, m4du, sdcs, topN_paths, orig_path) dialog_data["received_answers"].append(answer) dialog_data["answer_distributions"].append(ans_dist) #modify sdc seq sdcs = modify_sdc_seq.modify_sdc_seq_wrt_ans(sdcs, best_question, answer) # rerun inference vals, lprob, sdc_eval = m4du.infer_path(sdcs, sloc, orientation) topN_paths = [(vals[i], lprob[i]) for i in range(len(vals))] dialog_data["paths_topN"].append(topN_paths) #dump intermediate data into a pickle file # this is a good time to save data. cPickle.dump(dialog_data, open(dialog_dump_fn, 'w')) print "Saved at", dialog_dump_fn return dialog_dump_fn
def plot_performance_with_num_spatial_relations(model, results): threshold = 10 correct = correct_at_threshold(results, threshold) sdc_parser = direction_parser_sdc() #sdc_parser = direction_parser_wizard_of_oz(results['corpus_fname'], 'stefie10') subject_to_sr = {} subject_to_num_correct = {} total_sdcs = 0.0 total_sr_sdcs = 0.0 total_landmark_sdcs = 0.0 total_sentences = 0.0 total_raw_sdcs = 0.0 total_raw_sr_sdcs = 0.0 total_raw_landmark_sdcs = 0.0 for i, paths in enumerate(results['path']): sentence = results["sentences"][i] total_sentences += 1 subject = results["subjects"][i] sdcs = sdc_parser.extract_SDCs(sentence) for sdc in sdcs: total_raw_sdcs += 1 if not sdc["spatialRelation"].isNull(): total_raw_sr_sdcs += 1 if not sdc["landmark"].isNull(): total_raw_landmark_sdcs += 1 usable_sdcs = model.get_usable_sdc(sdcs) sr_count = 0.0 for sdc in usable_sdcs: total_sdcs += 1 print "sr", sdc["sr"] #if sdc["sr"] != None and sdc["sr"] in ["to", "past", "through"]: if sdc["sr"] != None: total_sr_sdcs += 1 sr_count += 1 if sdc["landmark"] != None: total_landmark_sdcs += 1 key = str(i) #key = subject subject_to_sr.setdefault(key, []) subject_to_sr[key].append(sr_count / len(usable_sdcs)) subject_to_num_correct.setdefault(key, 0) subject_to_num_correct[key] += correct[i] X = [] Y = [] label_map = {} for subject in subject_to_sr.keys(): x = na.mean(subject_to_sr[subject]) y = subject_to_num_correct[subject] X.append(x) Y.append(y) pt = (x, y) label = subject.replace("Subject", "") label_map.setdefault(pt, []) label_map[pt].append(label) for location, labels in label_map.iteritems(): x, y = location y_curr = y for label in labels: mpl.text(x, y_curr, label) y_curr += 0.05 mpl.scatter(X, Y) mpl.xlabel("Average # of spatial relations") mpl.ylabel("Number correct at %.0f meters" % threshold) print "average utilized sdcs", total_sdcs / total_sentences print "average utilized sr sdcs", total_sr_sdcs / total_sentences print "average utilized landmark sdcs", total_landmark_sdcs / total_sentences print "average raw sdcs", total_raw_sdcs / total_sentences print "average raw sr sdcs", total_raw_sr_sdcs / total_sentences print "average raw landmark sdcs", total_raw_landmark_sdcs / total_sentences mpl.show()
def __init__(self, m4du, corpus_fname=None, addFigureToMainWindow=True): QMainWindow.__init__(self) self.setupUi(self) self.m4du = m4du self.m4du.save_update_args = True self.corpus_fname = corpus_fname self.extractor = dir_util.direction_parser_sdc() self.landmarkSelector = landmarkSelector.MainWindow(self.m4du) self.spatialRelationsSelector = spatialRelationsSelector.MainWindow( self.m4du) self.connect( self.landmarkSelector.landmarkSelectorTable.selectionModel(), SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"), self.selectLandmarks) self.connect(self.submitButton, SIGNAL("clicked()"), self.followDirections) self.connect(self.zoomToMagicSpotButton, SIGNAL("clicked()"), self.load_ranges) self.connect(self.actionPreferences, SIGNAL("triggered()"), self.showPreferences) self.connect(self.actionClear, SIGNAL("triggered()"), self.clearDrawings) self.connect(self.actionSelectSpatialRelations, SIGNAL("triggered()"), self.showSpatialRelationsSelector) self.connect(self.actionSaveLimits, SIGNAL("triggered()"), self.saveLimits) self.connect(self.actionRestoreLimits, SIGNAL("triggered()"), self.restoreLimits) self.connect(self.jointVpToSlocElocButton, SIGNAL("clicked()"), self.jointVpToSlocEloc) self.connect(self.viewSrClassifierButton, SIGNAL("clicked()"), self.viewSrClassifier) self.figure = mpl.figure() self.axes = self.figure.gca() if addFigureToMainWindow: self.oldParent = self.figure.canvas.parent() self.figure.canvas.setParent(self) self.matplotlibFrame.layout().addWidget(self.figure.canvas) self.limits = None self.figure.canvas.mpl_connect('draw_event', self.updateLimits) print "m4du", m4du, hasattr(self.m4du, "boundingBox") plot_map_for_model(self.m4du) self.pathPlot, = self.figure.gca().plot([], [], '--', linewidth=4, color="black") self.startPlot, = self.figure.gca().plot([], [], 'go-') self.endPlot, = self.figure.gca().plot([], [], 'ro-') self.selectedLandmarkPlot = None self.selectedElocPlots = [] self.selectedSlocPlots = [] self.selectedStartingSlocPlots = [] self.correctElocPlots = [] self.landmarkPlots = [] self.plotLandmarks() self.transitionPlotDrawer = TransitionDrawer(self, self.slocTransitionsBox) self.observationPlotDrawer = ObservationDrawer( self, self.slocObservationProbabilityCheckBox) self.verbPlotDrawer = VerbDrawer(self, self.slocVerbProbabilityCheckBox) self.viewpointLandmarkPlotDrawer = ViewpointLandmarkDrawer( self, self.landmarkProbsCheckBox) self.topoDrawer = TopoDrawer(self, self.showTopologyBox) self.connect(self.filterBySlocBox, SIGNAL("stateChanged(int)"), self.updateFilters) self.connect(self.filterByElocBox, SIGNAL("stateChanged(int)"), self.updateFilters) self.connect(self.useSpatialRelationsBox, SIGNAL("stateChanged(int)"), self.toggleSpatialRelations) self.connect(self.showLandmarksCheckBox, SIGNAL("stateChanged(int)"), self.toggleLandmarks) self.connect(self.useWizardOfOzSdcsCheckBox, SIGNAL("stateChanged(int)"), self.toggleSdcExtractor) #self.useWizardOfOzSdcsCheckBox.setCheckState(Qt.Checked) self.showLandmarksCheckBox.setCheckState(Qt.Unchecked) self.toolbar = NavigationToolbar2QT(self.figure.canvas, self) self.addToolBar(self.toolbar) self.sdcModel = sdcTableModel.Model(self.m4du, self.sdcTable) self.landmarkModel = landmarkTableModel.Model(self.landmarkTable, self.m4du) #self.editorWindow = editorwindow.makeWindow() #self.editorWindow.engineMap = self.m4du.sr_class.engineMap #self.editorWindow.show() self.startingSlocModel = viewpointTableModel.Model( self.startingSlocTable, self.m4du, "starting sloc") self.jointViewpointModel = viewpointAndObservationProbabilityTableModel.Model( self.jointViewpointTable, self.m4du) self.slocModel = viewpointTableModel.Model(self.slocTable, self.m4du, "sloc") self.elocModel = viewpointTableModel.Model(self.elocTable, self.m4du, "eloc") self.connect( self.jointViewpointTable.selectionModel(), SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"), self.selectJointViewpoint) self.connect( self.sdcTable.selectionModel(), SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"), self.selectSdc) self.connect( self.landmarkTable.selectionModel(), SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"), self.selectLandmark) self.connect( self.slocTable.selectionModel(), SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"), self.selectSloc) self.connect( self.startingSlocTable.selectionModel(), SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"), self.selectStartingSloc) self.connect( self.elocTable.selectionModel(), SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"), self.selectEloc) self.startingSlocTable.selectRow(133) if hasattr(self.m4du, "boundingBox") and self.m4du.boundingBox != None: X, Y = na.transpose(self.m4du.boundingBox) self.limits = min(X), max(X), min(Y), max(Y) self.mpl_draw()
def __init__(self, m4du, rndf_file, inference_type="local", num_explorations=None, exploration_heuristics_name=None, parameters=None): self.lc = lcm.LCM() #load the model print "Loading model..." self.dg_model = cPickle.load(open(m4du, 'r')) self.rndf = ru.rndf(rndf_file, True) self.sdc_parser = direction_parser_sdc() self.trans_xyz = (0, 0, 0) self.trans_latlon = self.rndf.origin self.trans_theta = 0 self.run_inference = True self.waypoints = [] self.cmd = None self.curr_location = None self.curr_orientation = None # Change these two for testing; to be replaced by LCM message #self.curr_location = (0, 0, 0) #self.cmd = str("Go to the truck") #local vs global inference if inference_type in [None, ""]: self.inference_type = "local" elif inference_type != "global": self.inference_type = "local" else: self.inference_type = "global" if num_explorations in [None, ""]: #TODO replace 2 by the branching factor or something else. self.num_explorations = len(self.dg_model.tmap_locs.keys()) / 2 else: self.num_explorations = int(num_explorations) if exploration_heuristics_name in [None, ""]: self.exploration_heuristics_name = "lifted_stairs" else: self.exploration_heuristics_name = exploration_heuristics_name if self.exploration_heuristics_name == "slope_offset_delay": if parameters not in [None, ""]: params_str = parameters.split(":") if len(params_str) == 3: self.params_num = map(float, params_str) else: self.params_num = None else: self.params_num = None self.lc.subscribe("DIRECTION_INPUT", self.on_comment_msg) self.lc.subscribe("POSE", self.on_pose_msg) self.lc.subscribe("GPS_TO_LOCAL", self.on_transform_msg)
def __init__(self, corpus_fn, model_fn, gtruth_tag_fn, map_fn, output_dir, options, evaluation_mode="specialized", num_to_run=None, is_sum_product=False, num_align=None, no_spatial_relations=False, do_exploration=False, quadrant_number=None, wizard_of_oz_sdcs=None, run_description=None, inference="global", topN_num_paths=None,num_explorations=None,exploration_heuristics_name=None, parameters=None): print "num_to_run", num_to_run print "options", options options["model_fn"] = model_fn options["corpus_fn"] = corpus_fn options["gtruth_tag_fn"] = gtruth_tag_fn if inference == "": inference = "global" options["inference"]=inference self.range_to_run = None if num_to_run == "": num_to_run = None elif type(num_to_run)==type("abc") and num_to_run.find(":")!=-1: range_from = int(num_to_run.split(":")[0]) range_to = int(num_to_run.split(":")[1]) self.range_to_run = range(range_from,range_to) num_to_run = range_to elif num_to_run != None: num_to_run = int(num_to_run) if type(num_to_run) == type(1) and self.range_to_run==None: self.range_to_run = range(num_to_run) if self.range_to_run == None: #running all of them. if(quadrant_number==None): self.dsession = readSession(corpus_fn, "none") else: self.dsession = readSession(corpus_fn, "none", quadrant=int(quadrant_number)) self.range_to_run = [] sent_num_i = 0 for elt in self.dsession: for i in range(len(elt.routeInstructions)): self.range_to_run.append(sent_num_i) sent_num_i += 1 if num_explorations in [None, ""]: num_explorations=50 else: num_explorations=int(num_explorations) self.options = options self.output_dir = output_dir self.inference = inference self.num_align = num_align self.num_to_run = num_to_run self.is_sum_product = is_sum_product self.num_align = num_align if run_description == None: run_description = model_fn if inference !=None: run_description += " " + run_description if no_spatial_relations: run_description += " -sr" else: run_description += " +sr" self.run_description = run_description if(quadrant_number==None): self.dsession = readSession(corpus_fn, "none") #res = raw_input("running all examples! Continue?") #if(res.lower() == 'n' or res.lower() == "no"): # sys.exit(0); else: self.dsession = readSession(corpus_fn, "none", quadrant=int(quadrant_number)) self.dg_model = cPickle.load(open(model_fn, 'r')) self.dg_model.use_spatial_relations = not no_spatial_relations if inference == "greedy": self.dg_model = greedy.model(self.dg_model) elif inference == "last_sdc": self.dg_model = last_sdc.model(self.dg_model) elif inference == "topN": if topN_num_paths == None or topN_num_paths=="": self.topN_num_paths = 10 else: self.topN_num_paths = int(topN_num_paths) self.dg_model = topN.model(self.dg_model,self.topN_num_paths) elif inference == "global": pass else: raise ValueError("Bad inference value: " + inference) #self.do_exploration = eval(str(do_exploration)) self.do_exploration = do_exploration if evaluation_mode == "best_path": self.orient = get_orientations_each elif evaluation_mode == "max_prob": self.orient = get_orientations_all elif evaluation_mode == "specialized": self.orient = get_orientations_annotated else: raise ValueError("Unexpected mode: " + `evaluation_mode`) #this will load the srel_mat #if(isinstance(self.dg_model, model4_du.model4_du)): print "loading srel_mat" self.dg_model.initialize() #open the ground truth file self.tf = tag_file(gtruth_tag_fn, map_fn) self.gtruth_tag_fn = gtruth_tag_fn #map the topological regions to teh ground truth regions self.topohash = get_region_to_topo_hash_containment(self.tf, self.dg_model) print "getting topological paths" self.topo_graph_D = get_topological_paths_hash(self.dg_model.clusters) #cPickle.dump(self.topo_graph_D, open("topo_graph_D", "wb"), 2) #self.topo_graph_D = cPickle.load(open("topo_graph_D", "r")) if wizard_of_oz_sdcs != None: print "using wizard", wizard_of_oz_sdcs self.sdc_parser = direction_parser_wizard_of_oz(corpus_fn, wizard_of_oz_sdcs) else: print "using crfs" self.sdc_parser = direction_parser_sdc() if num_explorations in [None, ""]: #TODO replace 2 by the branching factor or something else. self.num_explorations=len(self.dg_model.tmap_locs.keys()) / 2 else: self.num_explorations=int(num_explorations) if exploration_heuristics_name in [None,""]: self.exploration_heuristics_name = "lifted_stairs" else: self.exploration_heuristics_name = exploration_heuristics_name if self.exploration_heuristics_name == "slope_offset_delay": if parameters not in [None, ""]: params_str = parameters.split(":") if len(params_str)==3: self.params_num = map(float,params_str) else: self.params_num = None else: self.params_num = None