예제 #1
0
def main():

    map_fn = "%s/data/directions/direction_floor_3/direction_floor_3_small_filled.cmf" % TKLIB_HOME
    cluster_fn = "%s/data/directions/direction_floor_3/skels/direction_floor_3_skel.pck" % TKLIB_HOME
    gtruth_tag_fn = "%s/data/directions/direction_floor_3/tags/df3_small_tags.tag" % TKLIB_HOME
    assignment_fns = [
        "%s/nlp/data/aaai_2010_smv/stefie10/assignment1.2.yaml" % TKLIB_HOME,
        "%s/nlp/data/aaai_2010_smv/stefie10/assignment_multiple_1.yaml" %
        TKLIB_HOME
    ]
    tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
    tagFile.get_map()
    tagFile.get_tag_names()

    skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)

    modelFile = "%s/nlp/data/smv.crf.model" % TKLIB_HOME

    chunker = CrfChunker(modelFile)

    trainingFile = "%s/nlp/training.txt" % TKLIB_HOME

    out = open(trainingFile, "w")
    for assignment_fn in assignment_fns:
        assignment = Assignment.load(assignment_fn, tagFile, skeleton)
        for entry in assignment.entries:
            chunker.writeTrainingForText(entry.command, entry.sdcs("stefie10"),
                                         out)

    out.close()
    chunker.runTraining(TKLIB_HOME + "/nlp/etc/crf++/test.template",
                        trainingFile, modelFile)
예제 #2
0
def main():
    from sys import argv
    map_fname = argv[1]
    tag_fname = argv[2]
    region_tag_fname = argv[3]
    extended_annotation_fname = argv[4]
    out_fname = argv[5]

    tagFile = tag_file(tag_fname, map_fname)

    #regionTagFile = tag_file(region_tag_fname, map_fname)

    extendedAnnotations = cPickle.load(open(extended_annotation_fname))

    doc = Document()
    corpusXml = doc.createElement("corpus")

    tagFileXml = tagFile.toXml(doc)
    tagFileXml.setAttribute("name", "objects")
    corpusXml.appendChild(tagFileXml)

    #regionTagFileXml = regionTagFile.toXml(doc)
    #regionTagFileXml.setAttribute("name", "regions")
    #corpusXml.appendChild(regionTagFileXml)

    corpusXml.appendChild(toXml(extendedAnnotations, doc))
    doc.appendChild(corpusXml)

    outfile = open(out_fname, "w")
    outfile.write(doc.toprettyxml())
    outfile.close()
예제 #3
0
def makeAssignment(dirname, tagFile, mapFile, engineMap):
    os.mkdir(dirname)
    tagFile = tag_util.tag_file(
        "../data/directions/direction_floor_3/log4_s3.tag",
        "../data/directions/direction_floor_3/log4_s3.cmf")
    tagLayer = tkloader.loadTagLayer(tagFile.as_slimd_polygons(),
                                     "%s/tags" % dirname)

    i = 0
    for landmarkId, feature in enumerate(layers.features(tagLayer)):
        for name, engine in engineMap.iteritems():
            if i >= 220:
                exfname = "%s/landmark.%d.ex.%d" % (dirname, landmarkId, i)
                pmap, fLayers = preposition.loadInstance(
                    exfname, engineMap, engine)

                f = open("%s/properties.yaml" % (exfname), "w")
                f.write(yaml.dump({"preposition": engine.name()}))
                f.close()

                groundLayer, geom = fLayers["ground"]
                groundLayer.startEditing()
                layers.addNewFeature(groundLayer,
                                     layers.getGeometry(feature.geometry()))
                groundLayer.commitChanges()

                figureLayer, geom = fLayers["figure"]
                figureLayer.startEditing()
                #layers.addNewFeature(figureLayer, [(0, 0)])
                figureLayer.commitChanges()

            i += 1
        if i >= 320:
            break
예제 #4
0
    def testLoad(self):
        map_fn = "%s/data/directions/direction_floor_3/direction_floor_3_small_filled.cmf" % TKLIB_HOME
        cluster_fn = "%s/data/directions/direction_floor_3/skels/direction_floor_3_skel.pck" % TKLIB_HOME
        gtruth_tag_fn = "%s/data/directions/direction_floor_3/tags/df3_small_tags.tag" % TKLIB_HOME
        assignment_fn = "%s/nlp/data/aaai_2010_smv/stefie10/assignment1.2.yaml" % TKLIB_HOME
        tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
        tagFile.get_map()
        tagFile.get_tag_names()

        skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)
        assignment = Assignment.load(assignment_fn, tagFile, skeleton)

        firstEntry = assignment.entries[0]

        self.assertEqual(firstEntry.verb, "follow")
        self.assertEqual(firstEntry.command,
                         "Follow the person to the kitchen.\n")
        self.assertTrue(firstEntry.situation != None)

        sdcs = firstEntry.sdcs("stefie10")
        self.assertEqual(len(sdcs), 2)
        self.assertEqual(sdcs[0].verb.text, "Follow")
        self.assertEqual(sdcs[1].spatialRelation.text, "to")

        fname = "test.yaml"
        assignment.save(fname)
        os.remove(fname)
예제 #5
0
파일: lcm_qt.py 프로젝트: stefie10/slu_hri
    def __init__(self, m4du, region_tagfile, map_fname):
        QMainWindow.__init__(self)
        self.setupUi(self)

        self.m4du = m4du

        self.lcmApp = App(self.m4du, region_tagfile, map_fname)
        self.socket = QSocketNotifier(self.lcmApp.lc.fileno(),
                                      QSocketNotifier.Read)
        self.connect(self.socket, SIGNAL("activated(int)"),
                     self.socketActivated)

        self.connect(self.sendPathButton, SIGNAL("clicked()"),
                     self.followDirections)

        self.connect(self.sendAndExecutePathButton, SIGNAL("clicked()"),
                     self.sendAndExecutePath)

        self.connect(self.confirmPathButton, SIGNAL("clicked()"),
                     self.confirmPath)

        self.connect(self.clearPathButton, SIGNAL("clicked()"),
                     self.lcmApp.clear_path)

        self.modelBrowser = modelBrowser.MainWindow(self.m4du)
        self.modelBrowser.show()
        self.tf_region = tag_file(region_tagfile, map_fname)
        self.dataset_name = region_tagfile.split("/")[-1].split("_")[0]

        self.topo_to_region = get_topo_to_region_hash(self.tf_region,
                                                      self.m4du)
예제 #6
0
    def testSpeed(self):
        map_fn = "%s/data/directions/direction_floor_3/direction_floor_3_small_filled.cmf" % TKLIB_HOME
        cluster_fn = "%s/data/directions/direction_floor_3/skels/direction_floor_3_skel.pck" % TKLIB_HOME
        gtruth_tag_fn = "%s/data/directions/direction_floor_3/tags/df3_small_tags.tag" % TKLIB_HOME
        assignment_fn = "%s/nlp/data/aaai_2010_smv/stefie10/assignment1.1.yaml" % TKLIB_HOME
        tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
        tagFile.get_map()
        tagFile.get_tag_names()

        skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)
        assignment = Assignment.load(assignment_fn, tagFile, skeleton)

        slowEntry = assignment.entries[30]

        model = modelForSituation(slowEntry.situation)

        def run():
            results = inference(model, approach("figure", "landmark"))
            print "results", results
            self.assertEqual(
                results,
                SpanningIntervalSet([
                    SpanningInterval(CL, Interval(CL, 0.0, 3600.0, CL),
                                     Interval(CL, 3600.0, 6300.0, CL), OP),
                    SpanningInterval(CL, Interval(CL, 6300.0, 24800.0, CL),
                                     Interval(CL, 24800.0, 31800.0, CL), OP)
                ]))

        start = datetime.now()
        cProfile.runctx("run()", globals(), locals(), "profile.out")
        end = datetime.now()

        print "took", end - start
예제 #7
0
    def testFollow(self):
        map_fn = "%s/data/directions/direction_floor_3/direction_floor_3_small_filled.cmf" % TKLIB_HOME
        cluster_fn = "%s/data/directions/direction_floor_3/skels/direction_floor_3_skel.pck" % TKLIB_HOME
        gtruth_tag_fn = "%s/data/directions/direction_floor_3/tags/df3_small_tags.tag" % TKLIB_HOME
        assignment_fn = "%s/nlp/data/aaai_2010_smv/stefie10/assignment1.1.yaml" % TKLIB_HOME
        tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
        tagFile.get_map()
        tagFile.get_tag_names()

        skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)
        assignment = Assignment.load(assignment_fn, tagFile, skeleton)

        slowEntry = assignment.entries[0]

        model = modelForSituation(slowEntry.situation)
        print "model", model

        results = inference(model, following("figure", "landmark"))

        self.assertEqual(
            results,
            SpanningIntervalSet([
                SpanningInterval(CL, Interval(CL, 1100.0, 5700.0, OP),
                                 Interval(OP, 1100.0, 5700.0, CL), OP),
                SpanningInterval(CL, Interval(CL, 6200.0, 15900.0, OP),
                                 Interval(OP, 6200.0, 15900.0, CL), OP),
                SpanningInterval(CL, Interval(CL, 16000.0, 21000.0, OP),
                                 Interval(OP, 16000.0, 21000.0, CL), OP)
            ]))
예제 #8
0
def main():
    argv = sys.argv
    start = time.time()
    laser_fname = argv[1]
    image_dir = argv[2]
    model_fname = argv[3]
    region_fname = argv[4]
    map_fname = argv[5]


    lf = logfile_du(laser_fname, image_dir)
    
    m4du = cPickle.load(open(model_fname, 'r'))
    end = time.time()

    m4du.initialize()
    print "tags"
    gtruth_tf = tag_file(region_fname, map_fname)
    me = model_evaluator(m4du, gtruth_tf, "d8")

    app = basewindow.makeApp()
    sentence = '''Go through the double doors and past the
        lobby.  Go into a lounge with some couches. Enjoy the nice
        view.  Go past the spiral staircase.  Continue towards the
        hallway with the cubby holes.  But don't go down that
        hallway. Instead take a right into the kitchen.'''

    sentence = "Go through the double doors and past the lobby."
    wnd = MainWindow(me, sentence, "R9", lf)
    wnd.setWindowTitle(model_fname)
    wnd.show()    

    retval = app.exec_()        
예제 #9
0
    def __init__(self, m4du, region_tagfile, map_filename):
        self.lc = lcm.LCM()

        #load the model
        print "Loading model..."
        self.dg_model = m4du
        self.sdc_parser = direction_parser_sdc()

        #    for k1 in dg_model.tmap.keys():
        #        print "===="
        #        for k2 in dg_model.tmap[k1]:
        #            print k2
        #            print dg_model.tmap_locs[k1]
        #            print dg_model.tmap_locs[k2]

        #get the topo_to_region_hash
        self.dataset_name = region_tagfile.split("/")[-1].split("_")[0]
        tf_region = tag_file(region_tagfile, map_filename)
        self.topo_to_region = get_topo_to_region_hash(tf_region, self.dg_model)

        self.waypoints = []
        self.cur_pose = None

        self.lc.subscribe("SPEECH_COMMANDS", self.on_speech_msg)
        self.lc.subscribe("POSE", self.on_pose_msg)

        self.cmd = None
예제 #10
0
def saveClassifiers():
    import psyco
    map_fn = "%s/data/directions/direction_floor_3/direction_floor_3_small_filled.cmf" % TKLIB_HOME
    cluster_fn = "%s/data/directions/direction_floor_3/skels/direction_floor_3_skel.pck" % TKLIB_HOME
    gtruth_tag_fn = "%s/data/directions/direction_floor_3/tags/df3_small_tags.tag" % TKLIB_HOME
    
    assignment_fns = ["%s/nlp/data/aaai_2010_smv/stefie10/assignment1.1.yaml" % TKLIB_HOME,
                      "%s/nlp/data/aaai_2010_smv/stefie10/assignment1.2.yaml" % TKLIB_HOME,
                      "%s/nlp/data/aaai_2010_smv/stefie10/assignment2.1.yaml" % TKLIB_HOME,
                      "%s/nlp/data/aaai_2010_smv/tkollar/assignment3.1.yaml" % TKLIB_HOME,
                      ]

    tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
    tagFile.get_map()
    tagFile.get_tag_names()
    
    #print cluster_fn
    #raw_input()
    skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)
    assignments = [Assignment.load(fn, tagFile, skeleton)for  fn in assignment_fns]
    #classifiers = makeClassifiers(assignment)
    result = []
    def run():
        classifiers = makeClassifiers(assignments)
        result.append(classifiers)
        

    start = time.time()
    cProfile.runctx("run()", globals(), locals(), "profile.out")
    end = time.time()
    print "took", (end - start)/60., "minutes"
    classifiers = result[0]    
    fname = "%s/nlp/data/engines.verbs.floor3.stefie10.pck" % TKLIB_HOME
    cPickle.dump(classifiers, open(fname, "w"))
    print "wrote", fname
    
    #testingAssignment = Assignment.load("%s/nlp/data/aaai_2010_smv/stefie10/assignment1.1.yaml" % TKLIB_HOME, tagFile, skeleton)
    #testingAssignment = Assignment.load("%s/nlp/data/aaai_2010_smv/tkollar/assignment3.1.yaml" % TKLIB_HOME, tagFile, skeleton)
    testingAssignment = Assignment.load("%s/nlp/data/aaai_2010_smv/stefie10/assignment4.1.yaml" % TKLIB_HOME, tagFile, skeleton)    
    
    for name, c in classifiers.iteritems():
        engine = c.engine
        testing = makeTable(engine, [testingAssignment])
        results = orngTest.testOnData([c.classifier], testing)
        mpl.figure()
        line, = orangeGui.rocCurve(results, engine.name, stepSize=0.001, marker="x", plotArgs=dict(color="k"))
        
        mpl.title(engine.name.capitalize(), fontsize=30)
        mpl.xlabel("TP")
        mpl.ylabel("FP")
        mpl.xticks([0, 1], fontsize=20)
        mpl.yticks([0, 1], fontsize=20)
        line.set_label(engine.name.upper())
        mpl.savefig("roc.%s.png" % engine.name)
        orangeUtils.displayResults(results)
    #mpl.legend(loc="lower right")
    #mpl.title("Classifiers for Verbs")
    mpl.show()
예제 #11
0
    def run_tagfile(self, prior_cache, tag_fn):
        tf = tag_file(tag_fn, map_filename=None)

        words = set([])
        mynames = tf.get_tag_names()
        for name in mynames:
            words.update(self.run_text(prior_cache, name))

        return words
예제 #12
0
def main():
    tagFile = tag_util.tag_file("../data/directions/direction_floor_3/log4_s3.tag", "../data/directions/direction_floor_3/log4_s3.cmf")
    
    polygons = tagFile.as_slimd_polygons()
    app = basewindow.makeApp()
    wnd = pathDescriberWindow.makeWindow(polygons, [[(43, 14), (40, 26)]])
    wnd.show()
    print len(polygons)
    retval = app.exec_()
예제 #13
0
def main():
    mpl.figure(figsize=(7, 5))
    #ofile = cPickle.load(open("data/directions/direction_floor_1_3d/output/iros_presentation_runs/helicopter_offline.output_7.pck", 'r'))  # actual ground truth
    ofile = cPickle.load(
        open(
            "data/directions/direction_floor_1_3d/output/helicopter_offline.output_9.pck",
            'r'))  # actual ground truth
    model = cPickle.load(
        open(
            "data/directions/direction_floor_1_3d/models/helicopter_offline.pck"
        ))

    #ofile['region_to_topology'] = get_region_to_topo_hash_containment(model.tag_file, model)

    tf = tag_file(
        "data/directions/direction_floor_1_3d/tags/df1_small_tags.tag",
        "data/directions/direction_floor_1_3d/direction_floor_1_small.cmf")

    corpus = loadCorpus(ofile["corpus_fname"])
    plot_distance_curve_iros(ofile,
                             corpus,
                             tf,
                             "+",
                             "r",
                             thelabel="Overall",
                             followedState=None,
                             linestyle="--")

    plots = plot_distance_curve_subject_iros(ofile,
                                             tf,
                                             create_figure=False,
                                             mystyle="k")

    for plot in plots:
        plot.set_label("_nolgend_")
    plots[0].set_label("Subjects")

    plot_distance_curve_random_iros(
        model,
        ofile["corpus_fname"],
        tf.tag_filename,
        tf.map_filename,
        color="b",
        marker="p",
        label="Random",
        region_to_topology=ofile["region_to_topology"])

    mpl.axis((0, 35, 0, 1))
    mpl.legend(loc="upper left")
    mpl.title("Performance on a Corpus of Instructions for a MAV")
    mpl.ylabel("Proportion\nwithin $x$ m\nof the true\ndestination",
               rotation="horizontal")
    mpl.subplots_adjust(left=0.24)

    mpl.show()
예제 #14
0
    def testFollowStationary(self):
        from spatialRelationClassifier import SpatialRelationClassifier
        
        
        sr_class = SpatialRelationClassifier()
        
        result = sr_class.classify("to", [(0, 0), (10, 10)], [(9, 9), (9, 10), (10, 10), (10, 9)])
        
        map_fn = "%s/data/directions/direction_floor_3/direction_floor_3_small_filled.cmf" % TKLIB_HOME
        cluster_fn = "%s/data/directions/direction_floor_3/skels/direction_floor_3_skel.pck" % TKLIB_HOME
        gtruth_tag_fn = "%s/data/directions/direction_floor_3/tags/df3_small_tags.tag" % TKLIB_HOME
        tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
        tagFile.get_map()
        tagFile.get_tag_names()
        
        skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)

    
        situation = Situation([Agent("figure", 
                                     [(0, (25.400000378489494, 9.0000001341104507, 0.0))]),
                               Agent("landmark",
                                     [(0, (25.200000375509262, 11.800000175833702, 0.0)), 
                                      (1, (25.400000378489494, 12.000000178813934, -2.3561944901923448)), 
                                      (2, (25.400000378489494, 12.200000181794167, -1.5707963267948966)), 
                                      (3, (25.400000378489494, 12.400000184774399, -1.5707963267948966)), 
                                      (4, (25.200000375509262, 12.600000187754631, -0.78539816339744828)), 
                                      (5, (25.200000375509262, 12.800000190734863, -1.5707963267948966)), 
                                      (6, (25.200000375509262, 13.000000193715096, -1.5707963267948966)), 
                                      (7, (25.200000375509262, 13.200000196695328, -1.5707963267948966)), 
                                      (8, (25.00000037252903, 13.40000019967556, -0.78539816339744828)), 
                                      (9, (24.800000369548798, 13.600000202655792, -0.78539816339744828)), 
                                      (10,(24.600000366568565, 13.800000205636024, -0.78539816339744828)), 
                                      (11,(24.600000366568565, 13.800000205636024, 0.0))])],
                                tagFile, 
                                skeleton)
                                
        classifiers = trainer_pacman.versionOne()
        
        for name, c in classifiers.iteritems():
            print "domain", name, c.classifier.domain.classVar.values
            self.assertEqual(len(c.classifier.domain.classVar.values), 2)
        
        sr_class = SpatialRelationClassifier()
        result = sr_class.classify("to", [(0, 0), (10, 10)], [(9, 9), (9, 10), (10, 10), (10, 9)])


        followClassifier = classifiers["follow"]
        
        followClassifier.classify(situation)
        
        sr_class = SpatialRelationClassifier()
        result = sr_class.classify("to", [(0, 0), (10, 10)], [(9, 9), (9, 10), (10, 10), (10, 9)])

        followClassifier.classify(situation)
        self.assertTrue(followClassifier.pTrue > 0.5)
예제 #15
0
    def __init__(self, tag_fname, map_fname):
        QMainWindow.__init__(self)
        self.setupUi(self)
        self.tag_fname = tag_fname
        self.tag_file = tag_file(self.tag_fname, map_fname)

        self.initializeMatplotlib()

        plot_tklib_log_gridmap(self.tag_file.get_map(),
                               cmap="carmen_cmap_white",
                               set_rc=True)

        self.plots = []
        self.figure.canvas.mpl_connect('draw_event', self.updateLimits)
        self.tagModel = tagModel.Model(self.tagTable)
        self.tagModel.load(self.tag_file)

        self.connect(
            self.tagTable.selectionModel(),
            SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"),
            self.selectTag)

        self.connect(
            self.capturerList.selectionModel(),
            SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"),
            self.changeCapturer)

        self.capturePolygon = capturers.PolygonCapturer()
        self.capturePoint = capturers.PointCapturer()

        self.connect(self.capturePolygon, SIGNAL("completedPolygon"),
                     self.completedPolygon)

        self.connect(self.capturePoint, SIGNAL("completedPoint"),
                     self.completedPoint)

        self.connect(self.actionSave, SIGNAL("triggered()"), self.save)

        self.connect(self.actionDeleteTags, SIGNAL("triggered()"),
                     self.deleteTags)

        self.capturers = [
            self.capturePoint,
            self.capturePolygon,
        ]

        self.timer = QTimer()
        self.timer.setSingleShot(True)

        def init():
            self.capturerList.setCurrentRow(0)

        self.connect(self.timer, SIGNAL("timeout()"), init)
        self.timer.start(10)
예제 #16
0
    def testVerbClassifierTrue(self):
        
        
        map_fn = "%s/data/directions/direction_floor_3/direction_floor_3_small_filled.cmf" % TKLIB_HOME
        cluster_fn = "%s/data/directions/direction_floor_3/skels/direction_floor_3_skel.pck" % TKLIB_HOME
        gtruth_tag_fn = "%s/data/directions/direction_floor_3/tags/df3_small_tags.tag" % TKLIB_HOME
        tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
        tagFile.get_map()
        tagFile.get_tag_names()
        
        skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)

    
        situation = Situation([Agent("figure", 
                                     # a list of tuples
                                     # [(time1, (x1, y1, theta1)),
                                     #  (time2, (x2, y2, theta2))]
                                     [(0, (0, 0, 0)), 
                                      (1, (0, 1, 1)),
                                      (2, (0, 2, 2))]),
                               Agent("landmark",
                                     [(0, (0, -1, -1)), 
                                      (1, (0, 0, 0)),
                                      (2, (0, 1, 1))])],
                                tagFile, 
                                skeleton
                                      )

        classifiers = trainer_pacman.versionOne()

        followClassifier = classifiers["follow"]

        self.assertEqual(followClassifier.classify(situation), False)
        print "true", followClassifier.pTrue
        print "false", followClassifier.pFalse
        self.assertTrue(0 <= followClassifier.pTrue <=  1)
        self.assertTrue(0 <= followClassifier.pFalse <= 1)
        

        situation = Situation([Agent("figure", 
                                     [(0, (0, 0, 0)), 
                                      (1, (0, 1, 1)),
                                      (2, (0, 2, 2))]),
                               Agent("landmark",
                                     [(0, (0, 100, 100)), 
                                      (1, (0, 101, 101)),
                                      (2, (0, 102, 102))])],
                                tagFile,
                                skeleton)
        self.assertEqual(followClassifier.classify(situation), False)
예제 #17
0
def main():
    from sys import argv
    import cPickle
    
    map_fn = argv[1]
    gtruth_tag_fn = argv[2]
    skeleton_fn = argv[3]
    tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
    tagFile.get_map()
    tagFile.get_tag_names()

    skeleton = cPickle.load(open(skeleton_fn, 'r'))

    verbAssignment("stefie10", "1.yaml", tagFile, skeleton)
예제 #18
0
    def testFollow(self):
        map_fn = "%s/data/directions/direction_floor_3/direction_floor_3_small_filled.cmf" % TKLIB_HOME
        cluster_fn = "%s/data/directions/direction_floor_3/skels/direction_floor_3_skel.pck" % TKLIB_HOME
        gtruth_tag_fn = "%s/data/directions/direction_floor_3/tags/df3_small_tags.tag" % TKLIB_HOME
        tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
        tagFile.get_map()
        tagFile.get_tag_names()
        
        skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)

    
        situation = Situation([Agent("figure", 
                                     [(0, (25.400000378489494, 9.0000001341104507, 0.0)), 
                                      (1, (25.400000378489494, 9.200000137090683, -1.5707963267948966)), 
                                      (2, (25.400000378489494, 9.4000001400709152, -1.5707963267948966)), 
                                      (3, (25.400000378489494, 9.6000001430511475, -1.5707963267948966)), 
                                      (4, (25.400000378489494, 9.8000001460313797, -1.5707963267948966)), 
                                      (5, (25.200000375509262, 10.000000149011612, -0.78539816339744828)), 
                                      (6, (25.200000375509262, 10.200000151991844, -1.5707963267948966)), 
                                      (7, (25.400000378489494, 10.400000154972076, -2.3561944901923448)), 
                                      (8, (25.400000378489494, 10.600000157952309, -1.5707963267948966)), 
                                      (9, (25.400000378489494, 10.800000160932541, -1.5707963267948966)), 
                                      (10,(25.600000381469727, 11.000000163912773, -2.3561944901923448)), 
                                      (11,(25.600000381469727, 11.000000163912773, 0.0))]),
                               Agent("landmark",
                                     [(0, (25.200000375509262, 11.800000175833702, 0.0)), 
                                      (1, (25.400000378489494, 12.000000178813934, -2.3561944901923448)), 
                                      (2, (25.400000378489494, 12.200000181794167, -1.5707963267948966)), 
                                      (3, (25.400000378489494, 12.400000184774399, -1.5707963267948966)), 
                                      (4, (25.200000375509262, 12.600000187754631, -0.78539816339744828)), 
                                      (5, (25.200000375509262, 12.800000190734863, -1.5707963267948966)), 
                                      (6, (25.200000375509262, 13.000000193715096, -1.5707963267948966)), 
                                      (7, (25.200000375509262, 13.200000196695328, -1.5707963267948966)), 
                                      (8, (25.00000037252903, 13.40000019967556, -0.78539816339744828)), 
                                      (9, (24.800000369548798, 13.600000202655792, -0.78539816339744828)), 
                                      (10,(24.600000366568565, 13.800000205636024, -0.78539816339744828)), 
                                      (11,(24.600000366568565, 13.800000205636024, 0.0))])],
                                tagFile, 
                                skeleton)
                                
        classifiers = trainer_pacman.versionOne()

        followClassifier = classifiers["follow"]
        
        followClassifier.classify(situation)
        
        self.assertTrue(followClassifier.pTrue > 0.5)
예제 #19
0
def main():

    from sys import argv
    import cPickle
    import basewindow
    app = basewindow.makeApp()

    map_fn = argv[1]
    gtruth_tag_fn = argv[2]
    cluster_fn = argv[3]
    assignment_fn = argv[4]
    tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
    tagFile.get_map()
    tagFile.get_tag_names()

    skeleton = cPickle.load(open(cluster_fn, 'r'))

    wnd = MainWindow(tagFile, skeleton, isEditable=True)
    wnd.show()
    humanAssignment = Assignment.load(
        "%s/nlp/data/aaai_2010_smv/stefie10/assignment1.1.yaml" % TKLIB_HOME,
        tagFile, skeleton)

    engine = follow.Engine()

    table = trainer_pacman.makeTable(engine, [humanAssignment])

    subsetTable = trainer_pacman.makeSubsetExamples(engine, table)

    entries = []
    for i, ex in enumerate(subsetTable):
        print "making entry", i
        entry = VerbAssignmentEntry(ex["entry"].value.verb,
                                    ex["entry"].value.command,
                                    tagFile,
                                    skeleton,
                                    situation=ex["situation"].value)
        entries.append(entry)
        if i > 10:
            break

    wnd.load(Assignment(entries, tagFile, skeleton))

    retval = app.exec_()
예제 #20
0
def compute_most_likely_locations(map_filename, tag_filename, prior,
                                  skeleton_filename):

    myspline = cPickle.load(open(skeleton_filename, 'r'))

    print "loading polygons"
    #load the tag file
    tf = tag_file(tag_filename, map_filename)

    print "getting likelihood map"
    l_map = map_likelihood_simple(tf, prior, myspline)

    print "map filename:", l_map.skeleton.map_filename
    #################
    #debug information
    #ion()
    #figure()
    #tkmap = l_map.skeleton.get_map()
    #plot_map(tkmap.to_probability_map_carmen(),
    #         tkmap.x_size, tkmap.y_size)
    #x, y = [36.4, 31.5]
    #vpts = get_visible_points([x,y], l_map.pts, l_map.skeleton.get_map())
    #vpolys = get_visible_polygons([x,y], l_map.poly, l_map.skeleton.get_map())
    #for elt in vpts:
    #    print elt.tag
    #for elt in vpolys:
    #    print elt.tag
    #plot([x], [y], 'ro')
    #draw()
    #raw_input("waiting")
    #################

    #for elt in loc_info:
    #    print "adding context", elt
    l_map.add_context("zebra")
    #print l_map.MAP()
    print "getting nearest neighbors"
    ret_lmap = l_map.get_lmap_nn("zebra")

    l_map.skeleton.gridmap = None
    l_map.tf = None
    print "dumping"
    cPickle.dump(ret_lmap, open("test_out.pck", 'w'))
예제 #21
0
    def __init__(
            self,
            logfilename,
            mapfilename,
            tag_filename,
            image_dir,  #pts_tag, polys_tag, 
            tp=1.0,
            tn=1.0,
            seed=1):

        self.flaser, self.rlaser, self.tpos = load_carmen_logfile(logfilename)
        self.map_filename = mapfilename
        self.gridmap = None

        self.tfile = tag_file(tag_filename, mapfilename)
        #self.tags_pts = pts_tag
        #self.tags_polys = polys_tag

        self.tp = tp
        self.tn = tn
        tklib_init_rng(seed)

        self.path_pts = self.get_path_pts()

        #get the path points as well as their indicies
        ppts = self.get_unique_path_pts()
        self.path_pts_unique, self.path_pts_to_unique_index, self.unique_to_path_pts = ppts

        #get the visible objects for each direction
        myvisible = self.get_visible(self.path_pts_unique)
        self.visible_objects, self.vobjs0, self.vobjs90, self.vobjs180, self.vobjs270 = myvisible

        #get the path pts
        self.path_pts_unique_nn = self.get_neighbors()
        self.image_to_laser_mapping = self.associate_images(image_dir)
        '''print "image to laser mapping"
예제 #22
0
    def __init__(self, tag_fname, map_fname):
        QMainWindow.__init__(self)
        self.setupUi(self)

        self.tag_file = tag_file(tag_fname, map_fname)

        self.initializeMatplotlib()

        plot_tklib_log_gridmap(self.tag_file.get_map())

        self.playerWindow = squintPlayer.player.Window()
        self.playerWindow.show()

        self.pointModel = pointModel.Model(self.pointTable)

        self.trackModel = trackModel.Model(self.trackTable)

        self.connect(
            self.trackTable.selectionModel(),
            SIGNAL("selectionChanged ( QItemSelection, QItemSelection )"),
            self.selectTrack)
        self.limits = [0, 0, 100, 100]
        self.plots = []
        self.figure.canvas.mpl_connect('draw_event', self.updateLimits)
예제 #23
0
        p2.set_data(X_topo, Y_topo)

        draw()
        #raw_input()

    show()


if __name__ == "__main__":
    if (len(argv) == 6):
        print "loading logfile"
        lf = logfile_du(argv[1], argv[2])

        print "loading model evaluator"
        #try out standard inference
        gtruth_tf = tag_file(argv[4], argv[5])
        me = model_evaluator(cPickle.load(open(argv[3], 'r')), gtruth_tf, "d8")

        #mysentence = "Go through the doors and past the elevators to the fountains"

        #mysentence = ''' Go through the door near the
        #elevators. Continue straight past the whiteboard.  Turn left
        #at the end of the hall.  Then turn right and go straight to
        #the end of the hall towards the kitchen area.  Take a left and
        #go through the doors, you should see a nice view.'''

        #mysentence = '''Exit through the door at the left.  Go through
        #the hallway following it around as it turns right.  Take a
        #right and then a quick left.  Go through the double doors,
        #another set of double doors and enter ther room with couches
        #and a nice view.'''
예제 #24
0
import cPickle
from sys import argv
from tag_util import tag_file
from du.eval_util import model_evaluator
from du.explore import explore

if __name__ == "__main__":
    if (len(argv) == 4):
        #try out standard inference
        gtruth_tf = tag_file(argv[2], argv[3])
        me = model_evaluator(cPickle.load(open(argv[1], 'r')), gtruth_tf, "d8")
        print me.evaluate_sentence(
            "Go through the doors and past the elevators to the fountains",
            start_region="R17")

        #try out exploration
        myexp = explore(cPickle.load(open(argv[1], 'r')))
        me = model_evaluator(myexp, gtruth_tf, "d8")
        print me.evaluate_sentence(
            "Go through the doors and past the elevators to the fountains",
            start_region="R17")

    else:
        print "usage:\n\tpython evaluate_model.py dg_model.pck gtruth.tag map_filename"
예제 #25
0
    def __init__(self,
                 myclusterfile,
                 mycachelmap,
                 num_viewpoints=4,
                 map_filename=None,
                 tag_filename=None,
                 boundingBox=None):
        tklib_init_rng(1)

        #load the clusters
        print "loading clusters", myclusterfile
        self.clusters = cPickle.load(open(myclusterfile, 'r'))
        print "cluster class", self.clusters.__class__
        self.clusters.skel.G = None
        print "skel", self.clusters.skel
        print "skel", self.clusters.skel.__class__
        self.load_lmap(mycachelmap)
        self.allow_backtracking = False

        #model.clusters.tf.compute_path()

        if tag_filename != None:
            print "overwriting tag file", tag_filename
            self.clusters.tf = tag_file(tag_filename, map_filename)

            #if(boundingBox != None):
            self.clusters.tf.filter(boundingBox)
        self.tag_file = self.clusters.tf

        if map_filename != None:
            print "mapFile", map_filename
            self.clusters.skel.map_filename = map_filename
            self.clusters.tf.map_filename = map_filename

        #load the topological map
        print "getting topological map"
        self.tmap, tmap_cnt, self.tmap_locs_3d = self.clusters.get_topological_map(
        )
        self.tmap_locs = dict([(key, loc[0:2])
                               for key, loc in self.tmap_locs_3d.iteritems()])
        self.tmap_keys = sorted(self.tmap.keys())

        self.num_regions = len(self.tmap.keys())

        #get the viewpoints... assumes that self.tmap is loaded
        self.num_viewpoints = num_viewpoints
        self.viewpoints, self.vpt_to_num = self.get_viewpoints(
            self.num_viewpoints)

        print "computing shortest paths"
        #self.shortest_paths, self.path_lengths = self.compute_shortest_paths()
        self.shortest_paths = None
        self.path_lengths = None

        self.vpt_to_tmap_index = {}
        self.vp_i_to_pose = {}
        for vp_i, vp in enumerate(self.viewpoints):
            topo, orient = vp.split("_")
            self.vpt_to_tmap_index[vp] = self.tmap_keys.index(float(topo))
            self.vp_i_to_pose[vp_i] = radians(float(orient))

        self.vp_i_to_topo_i = zeros(len(self.viewpoints), dtype=int32)
        for elt in self.viewpoints:
            self.vp_i_to_topo_i[
                self.vpt_to_num[elt]] = self.vpt_to_tmap_index[elt]

        #get the visible tags for each region
        self.topo_key_to_vtags, self.topo_key_to_itags = self.get_region_to_object_visibility_hash(
            self.tmap_locs)
        self.vp_i_to_vtags = self.get_vp_i_to_vtags()

        self.myparser = direction_parser()
        self.sr_class = SpatialRelationClassifier()
        self.spatial_relations = self.sr_class.engineNames
예제 #26
0
def plot_distance_curve_random(model,
                               corpus_fn,
                               gtruth_tag_fn,
                               map_fn,
                               color,
                               marker,
                               label='',
                               linestyle="-",
                               region_to_topology=None):
    """
    Needs the viewpoints and stuff from the model. 
    """
    print "starting random"
    dsession = readSession(corpus_fn, "none")
    if gtruth_tag_fn != None:
        tf = tag_file(gtruth_tag_fn, map_fn)
        topohash = get_region_to_topo_hash_containment(tf, model)
    else:
        topohash = region_to_topology
    Dists = []
    for elt in dsession:
        for i in range(len(elt.routeInstructions)):

            if (elt.columnLabels[i] is None):
                print "sentence", i, "was", elt.columnLabels[i]
                continue

            start_true, end_true = elt.columnLabels[i].split("to")
            start_true = str(start_true.strip())
            end_true = str(end_true.strip())
            iSlocTopo = topohash[start_true][0]
            iElocTopo = topohash[end_true][0]
            eloc = model.tmap_locs[iElocTopo]

            total_dist = 0.0
            for vp in model.viewpoints:
                topo, orient = vp.split("_")
                vp_loc = model.tmap_locs[float(topo)]
                total_dist += math2d_dist(vp_loc, eloc)

            expected_dist = total_dist / len(model.viewpoints)
            Dists.append(expected_dist)
    Y = []
    X = []
    for threshold in Dists:

        #get the ones above the threshold
        #print nonzero(array(Dists) > threshold)
        #print array(Dists) > threshold
        Itrue, = nonzero(array(Dists) <= threshold)

        Y.append(len(Itrue) / (1.0 * len(Dists)))
        X.append(threshold)

    num_correct_at_threshold = len(nonzero(array(Dists) <= 10)[0])
    print "random less than 10 meters", num_correct_at_threshold,
    print "%.3f%%" % (num_correct_at_threshold / (1.0 * len(Dists)))
    print "sorting"
    X, I = quicksort(X)
    print "taking"
    Y = array(Y).take(I)
    print "plotting"

    if (X[0] > 0.0):
        Xf = [X[0]]
        Xf.extend(X)
        Yf = [0]
        Yf.extend(Y)
        X = Xf
        Y = Yf

    p = plot_markers_evenly(X,
                            Y,
                            label,
                            marker,
                            color,
                            linewidth=2.5,
                            linestyle=linestyle)
    xlabel('distance from destination (m)')
    ylabel('proportion correct')
    return p
예제 #27
0
def main():
    from sys import argv
        
    map_fn = argv[1]
    gtruth_tag_fn = argv[2]
    cluster_fn = argv[3]
    assignment_fns = argv[4:]
    tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
    tagFile.get_map()
    tagFile.get_tag_names()
    
    
    skeleton = carmen_map_skeletonizer.load(cluster_fn, map_fn)

    assignments = [Assignment.load(assignment_fn, tagFile, skeleton) for assignment_fn 
                   in assignment_fns]
    
    engineMap = dict((x.name, x) for x in
                     [bring.Engine(), 
                      follow.Engine(), 
                      meet.Engine(), 
                      avoid.Engine(), 
                      #wander.Engine(), 
                      #go.Engine(),
                      ])
    
    
    for engine in engineMap.values():
        verb = engine.name
        if verb != "follow" and False:
            continue
        

        def run():
            return makeTable(engine, assignments)
        #cProfile.runctx("run()", globals(), locals(), "profile.out")
        #return
        table = run()
        print "verb", verb, len(table)  
    
        cv_indices = orange.MakeRandomIndicesCV(table, 2)
        humanLabeledTraining = table.select(cv_indices, 0)
        training = orange.ExampleTable(humanLabeledTraining.domain)
        training.extend(humanLabeledTraining)
        generatedTraining = makeSubsetExamples(engine, humanLabeledTraining)
        training.extend(generatedTraining)
        
        print "Using", len(generatedTraining), "subset examples"
        
        testing = table.select(cv_indices, 1)
        
        #testFeatureSubsets(engine, training, testing)
        
        #classifier  = orngBayes.BayesLearner(training)
        classifier  = RandomForestLearner(training)
        results = orngTest.testOnData([classifier], testing)
        print "results", results
        tuples = list(zip(testing, results.results))
        tuples.sort(key=lambda x: x[0]["description"])
        for e, r in tuples:
#            print e["description"], e["hasApproach"], e["hasFollow"],
            if r.actualClass == r.classes[0]:
                print "correct", e["description"], e["entry"].value.id 
            else:
                print "incorrect", e["description"], e["entry"].value.id 

        mpl.figure(figsize=(6,6))
        mpl.subplots_adjust(bottom=0.13)
        line, = orangeGui.rocCurve(results, engine.name, stepSize=0.001,
                                   plotArgs={"color":"black"})

        orangeUtils.displayResults(results)
        mpl.xlabel("FP", fontsize=32)
        mpl.ylabel("TP", fontsize=32)
        mpl.xticks((0, 1), fontsize=20)
        mpl.yticks((0, 1), fontsize=20)
        line.set_label(engine.name)
        mpl.title(engine.name.capitalize(), fontsize=32)
        mpl.savefig("roc_%s.png" % engine.name)
        mpl.savefig("roc_%s.ps" % engine.name)
    mpl.show()
예제 #28
0
def loadTagFile(gtruth_tag_fn, map_fn):
    tagFile = tag_util.tag_file(gtruth_tag_fn, map_fn)
    tagFile.get_map()
    tagFile.get_tag_names()
    return tagFile
예제 #29
0
    def __init__(self, corpus_fn, model_fn, gtruth_tag_fn, map_fn, output_dir, options, 
                 evaluation_mode="specialized", 
                 num_to_run=None, is_sum_product=False, num_align=None,
                 no_spatial_relations=False, do_exploration=False, quadrant_number=None,
                 wizard_of_oz_sdcs=None, run_description=None, inference="global", topN_num_paths=None,num_explorations=None,exploration_heuristics_name=None, parameters=None):
        print "num_to_run", num_to_run
        print "options", options
        options["model_fn"] = model_fn
        options["corpus_fn"] = corpus_fn
        options["gtruth_tag_fn"] = gtruth_tag_fn
        if inference == "":
            inference = "global"
            options["inference"]=inference
        self.range_to_run = None
        if num_to_run == "":
            num_to_run = None
        elif type(num_to_run)==type("abc") and num_to_run.find(":")!=-1:
            range_from = int(num_to_run.split(":")[0])
            range_to = int(num_to_run.split(":")[1])
            self.range_to_run = range(range_from,range_to)
            num_to_run = range_to
        elif num_to_run != None:
            num_to_run = int(num_to_run)

        if type(num_to_run) == type(1) and self.range_to_run==None:
            self.range_to_run = range(num_to_run)
        if self.range_to_run == None:
            #running all of them.
            if(quadrant_number==None):
                self.dsession = readSession(corpus_fn, "none")
            else:
                self.dsession = readSession(corpus_fn, "none", quadrant=int(quadrant_number))
            self.range_to_run = []
            sent_num_i = 0
            for elt in self.dsession:
                for i in range(len(elt.routeInstructions)):
                    self.range_to_run.append(sent_num_i)
                    sent_num_i += 1

        if num_explorations in [None, ""]:
            num_explorations=50
        else:
            num_explorations=int(num_explorations)                
                
        self.options = options
        self.output_dir = output_dir
        self.inference = inference

        self.num_align = num_align
        self.num_to_run = num_to_run
        self.is_sum_product = is_sum_product
        self.num_align = num_align

        if run_description == None:
            run_description = model_fn
            if inference !=None:
                run_description += " " + run_description
            if no_spatial_relations:
                run_description += " -sr"
            else:
                run_description += " +sr"
        self.run_description = run_description
        
            

        if(quadrant_number==None):
            self.dsession = readSession(corpus_fn, "none")
            #res = raw_input("running all examples!  Continue?")
            #if(res.lower() == 'n' or res.lower() == "no"):
            #    sys.exit(0);
        else:
            self.dsession = readSession(corpus_fn, "none", quadrant=int(quadrant_number))
        
        self.dg_model = cPickle.load(open(model_fn, 'r'))
        self.dg_model.use_spatial_relations = not no_spatial_relations
        
        if inference == "greedy":
            self.dg_model = greedy.model(self.dg_model)
        elif inference == "last_sdc":
            self.dg_model = last_sdc.model(self.dg_model)
        elif inference == "topN":
            if topN_num_paths == None or topN_num_paths=="":
                self.topN_num_paths = 10
            else:
                self.topN_num_paths = int(topN_num_paths)
            self.dg_model = topN.model(self.dg_model,self.topN_num_paths)
        elif inference == "global":
            pass
        else:
            raise ValueError("Bad inference value: " + inference)
        
        #self.do_exploration = eval(str(do_exploration))
        self.do_exploration = do_exploration
        
        if evaluation_mode == "best_path":
            self.orient = get_orientations_each
        elif evaluation_mode == "max_prob":
            self.orient = get_orientations_all
        elif evaluation_mode == "specialized":
            self.orient = get_orientations_annotated
        else:
            raise ValueError("Unexpected mode: " + `evaluation_mode`)
        #this will load the srel_mat
        #if(isinstance(self.dg_model, model4_du.model4_du)):
        print "loading srel_mat"
        self.dg_model.initialize()
    
        #open the ground truth file
        self.tf = tag_file(gtruth_tag_fn, map_fn)
        self.gtruth_tag_fn = gtruth_tag_fn
        
        #map the topological regions to teh ground truth regions
        self.topohash = get_region_to_topo_hash_containment(self.tf, self.dg_model)

        print "getting topological paths"
        self.topo_graph_D = get_topological_paths_hash(self.dg_model.clusters)
        #cPickle.dump(self.topo_graph_D, open("topo_graph_D", "wb"), 2)
        #self.topo_graph_D = cPickle.load(open("topo_graph_D", "r"))
        
        if wizard_of_oz_sdcs != None:
            print "using wizard", wizard_of_oz_sdcs
            self.sdc_parser = direction_parser_wizard_of_oz(corpus_fn, wizard_of_oz_sdcs)
        else:
            print "using crfs"
            self.sdc_parser = direction_parser_sdc()
        
        if num_explorations in [None, ""]:
            #TODO replace 2 by the branching factor or something else.
            self.num_explorations=len(self.dg_model.tmap_locs.keys()) / 2
        else:
            self.num_explorations=int(num_explorations)
            
        if exploration_heuristics_name in [None,""]:
            self.exploration_heuristics_name = "lifted_stairs"
        else:
            self.exploration_heuristics_name = exploration_heuristics_name
            
        if self.exploration_heuristics_name == "slope_offset_delay":
            if parameters not in [None, ""]:
                params_str = parameters.split(":")  
                if len(params_str)==3:
                    self.params_num = map(float,params_str)
                else:
                    self.params_num = None
        else:
            self.params_num = None