def predictionAndUpdateOneParticle_firsttime(self, X, dt, dt2, keypoints, step, P):
		
		"""
		FastSLAM1.0
		"""
		
		weight = 0.0 # weight (return value)
		weights = []
		
		# 姿勢予測 prediction of position
		X_ = Particle()
		X_.landmarks = X.landmarks
		X_.x = X.x + dt*X.v + dt2*X.a
		X_.v = X.v + dt*X.a
		X_.a = X.a
		X_.o = X.o
		
		for keypoint in keypoints:
			# previous landmark id
			prevLandmarkId = (step-1)*10000 + keypoint.prevIndex
			# new landmark id
			landmarkId = step*10000 + keypoint.index
			# The landmark is already observed or not?
			if(X_.landmarks.has_key(prevLandmarkId) == False):
				# Fisrt observation
				# Initialize landmark and append to particle
				landmark = Landmark()
				landmark.init(X_, keypoint, P, self.focus)
				X_.landmarks[landmarkId] = landmark
			else:
				print("Error on pf_step_camera_firsttime")
				
		self.count+=1
		
		return X_, weight
Esempio n. 2
0
    def predictionAndUpdateOneParticle_firsttime(self, X, dt, dt2, keypoints,
                                                 step, P):
        """
		FastSLAM1.0
		"""

        weight = 0.0  # weight (return value)
        weights = []

        # 姿勢予測 prediction of position
        X_ = Particle()
        X_.landmarks = X.landmarks
        X_.x = X.x + dt * X.v + dt2 * X.a
        X_.v = X.v + dt * X.a
        X_.a = X.a
        X_.o = X.o

        for keypoint in keypoints:
            # previous landmark id
            prevLandmarkId = (step - 1) * 10000 + keypoint.prevIndex
            # new landmark id
            landmarkId = step * 10000 + keypoint.index
            # The landmark is already observed or not?
            if (X_.landmarks.has_key(prevLandmarkId) == False):
                # Fisrt observation
                # Initialize landmark and append to particle
                landmark = Landmark()
                landmark.init(X_, keypoint, P, self.focus)
                X_.landmarks[landmarkId] = landmark
            else:
                print("Error on pf_step_camera_firsttime")

        self.count += 1

        return X_, weight
Esempio n. 3
0
    def set_orientations(self, landmark, perspective):
        options = set()
        if landmark.parent and landmark.parent.parent_landmark:
            middle_lmk = Landmark('',
                                  PointRepresentation(landmark.parent.middle),
                                  landmark.parent, None)
            options = OrientationRelationSet.get_applicable_relations(
                perspective,
                middle_lmk,
                Landmark(None,
                         PointRepresentation(landmark.representation.middle),
                         None, None),
                use_distance=False)

            par_lmk = landmark.parent.parent_landmark
            if par_lmk.parent and par_lmk.parent.parent_landmark:
                par_middle_lmk = Landmark(
                    '', PointRepresentation(par_lmk.parent.middle),
                    par_lmk.parent, None)
                trajector = Landmark(
                    '', PointRepresentation(par_lmk.representation.middle),
                    None, None)
                par_options = OrientationRelationSet.get_applicable_relations(
                    perspective, par_middle_lmk, trajector, use_distance=False)
            else:
                par_options = []

            options = sorted(set(options).difference(set(par_options)))
            self.set_orientations(par_lmk, perspective)

        landmark.ori_relations = map(type, options)
 def guess_landmark(self, obs, dyn):
     """Based on the particle position and observation, guess the location of the landmark. Origin at top left"""
     distance, direction = obs
     lm_x = self.pos_x + distance * math.cos(direction)
     lm_y = self.pos_y + distance * math.sin(direction)
     if dyn == 1:
         return Landmark(lm_x, lm_y, True)
     else:
         return Landmark(lm_x, lm_y, False)
Esempio n. 5
0
    def test_particle2_data_association(self):
        robot = Particle2(100, 100, 0, is_robot=True)

        landmark1 = Landmark(20, 20)
        landmark2 = Landmark(40, 60)
        landmark_fake2 = Landmark(45, 60)
        landmark3 = Landmark(20, 110)
        robot.landmarks = [landmark1, landmark_fake2, landmark3]
        obs = robot.sense([landmark2], 1)
        o = obs[0]
        prob, idx = robot.pre_compute_data_association(o)
        self.assertEqual(idx, 1)
Esempio n. 6
0
    def test_data_association(self):
        robot = Particle(100, 100, 0, is_robot=True)

        landmark1 = Landmark(20, 20)
        landmark2 = Landmark(40, 60)
        landmark_fake2 = Landmark(45, 60)
        landmark3 = Landmark(20, 110)
        robot.landmarks = [landmark1, landmark_fake2, landmark3]
        obs = robot.sense([landmark2], 1)
        o = obs[0]
        prob, idx, ass_obs, ass_jacobian, ass_adjcov = robot.find_data_association(
            o)
        self.assertEqual(idx, 1)
Esempio n. 7
0
    def __init__(self):
        self.bridge_object = CvBridge()
        self.curve = False
        self.landmark = Landmark()  # oggetto della classe Landmark
        self.north = 0
        self.qr_code = "11"
        self.qr_distance = 200
        self.lowlimit = 0
        self.upperlimit = 359

        self.pub = rospy.Publisher("planning_topic", IntString, queue_size=10)
        rospy.Subscriber("qr_topic", Image, self.planner_callback, 0)
        rospy.Subscriber("magnetometer_topic", Float32, self.planner_callback,
                         1)
Esempio n. 8
0
    def correct(self, sflist):
        '''
        Correct the weight for each particle using RToF signatures
        '''
        # TODO: See if this function can be optimized
        for sf in sflist:
            sig = sf.locSignature
            plist = self.particles[sf.name][0]
            pwts = self.particles[sf.name][1]
            
            isRevisit = False
            if sf.name in self.landmarks:
                landmarks = self.landmarks[sf.name]
            else:
                self.landmarks[sf.name] = []
                landmarks = self.landmarks[sf.name]
                
            # Check if it is a revisited landmark and update particle weights
            if landmarks:
                for lm in landmarks:
                    if (lm.isRevisit(sig)):
                        isRevisit = True
                        self.numRevisit += 1
                        self.updateParticleWtOnRevisit(plist, pwts, lm.hash)

            
            # if it is not a revisit add new landmark
            if not isRevisit:
                lm = Landmark(sig)
                self.numlandmarks += 1
                landmarks.append(lm)
                # For each particle for a SensorFly store the estimated locations
                for p in plist:
                    p.landmarkDict[lm.hash] = [p.x, p.y]
Esempio n. 9
0
def test_sense():
    landmarks = [Landmark(3, 3, None)]
    robot = RobotBin(0, 0, 'car.png', None, landmarks, [], None)
    hfilter = HistogramFilter(1, 10, 10, landmarks, robot)
    hfilter.belief = np.zeros((6, 6)) + (float(1) / 36)
    hfilter.z_noise = 0.5
    hfilter.sense_update([(0, 1)], (0, 0))

    correct = np.array([[
        1.07307687e-06, 1.07126166e-04, 2.14493697e-03, 5.82993552e-03,
        2.14493697e-03, 1.07126166e-04
    ],
                        [
                            1.48037381e-05, 2.14493697e-03, 4.30754611e-02,
                            1.17090636e-01, 4.30754611e-02, 2.14493697e-03
                        ],
                        [
                            3.96330630e-05, 5.82993552e-03, 1.17090636e-01,
                            3.18284739e-01, 1.17090636e-01, 5.82993552e-03
                        ],
                        [
                            1.48037381e-05, 2.14493697e-03, 4.30754611e-02,
                            1.17090636e-01, 4.30754611e-02, 2.14493697e-03
                        ],
                        [
                            1.07307687e-06, 1.07126166e-04, 2.14493697e-03,
                            5.82993552e-03, 2.14493697e-03, 1.07126166e-04
                        ],
                        [
                            3.58496782e-07, 1.07307687e-06, 1.48037381e-05,
                            3.96330630e-05, 1.48037381e-05, 1.07307687e-06
                        ]])
    np.testing.assert_allclose(hfilter.belief, correct, rtol=1e-5)
Esempio n. 10
0
 def setup_world(self):
     """Set up landmarks"""
     self.landmarks = [
         Landmark(np.random.randint(0, WINDOWHEIGHT - 20),
                  np.random.randint(0, WINDOWWIDTH - 20))
         for _ in range(LANDMARKNUMBER)
     ]
Esempio n. 11
0
        def instantiate_relations(landmark):

            bullshit_trajector = Landmark(None, PointRepresentation(Vec2(0,
                                                                         0)),
                                          None)
            relations = []
            if not isinstance(landmark.representation, SurfaceRepresentation):
                for rel in DistanceRelationSet.relations:
                    for dist_class, deg_class in list(
                            product([
                                Measurement.NEAR
                                if rel == ToRelation else Measurement.FAR
                            ], Degree.all)):
                        relation = rel(perspective, landmark,
                                       bullshit_trajector)
                        relation.measurement.best_distance_class = dist_class
                        relation.measurement.best_degree_class = deg_class
                        relations.append(relation)

            for rel in ContainmentRelationSet.relations:
                relation = rel(perspective, landmark, bullshit_trajector)
                relations.append(relation)

            for rel in OrientationRelationSet.relations:
                for dist_class, deg_class in list(
                        product([Measurement.FAR], Degree.all)) + [
                            (Measurement.NONE, Degree.NONE)
                        ]:
                    # for dist_class, deg_class in [(Measurement.NONE,Degree.NONE)]:
                    relation = rel(perspective, landmark, bullshit_trajector)
                    relation.measurement.best_distance_class = dist_class
                    relation.measurement.best_degree_class = deg_class
                    relations.append(relation)
            return relations
Esempio n. 12
0
 def __init__(self, poly, alt_of=None):
     super(PolygonRepresentation, self).__init__(alt_of)
     self.poly = poly
     self.num_dim = 2
     self.middle = poly.centroid
     self.landmarks = {
         'middle': Landmark('middle', PointRepresentation(self.middle), self, Landmark.MIDDLE)
     }
Esempio n. 13
0
 def sample_point_trajector(self, bounding_box, relation, perspective, landmark, step=0.02):
     """
     Sample a point of interest given a relation and landmark.
     """
     probs, points = self.get_probabilities_box(bounding_box, relation, perspective, landmark)
     probs /= probs.sum()
     index = probs.cumsum().searchsorted( random.sample(1) )[0]
     return Landmark( 'point', Vec2( *points[index] ), None, Landmark.POINT )
Esempio n. 14
0
    def __init__(self, circ, alt_of=None):
        self.circ = circ
        self.num_dim = 2
        self.middle = circ.center
        self.alt_representations = [PointRepresentation(self.middle, self)]

        self.landmarks = {
            'middle': Landmark('middle', PointRepresentation(self.middle), self, Landmark.MIDDLE)
        }
def main():
    landmark1 = Landmark(2, -2)
    landmark2 = Landmark(-1, -3)
    landmark3 = Landmark(3, 3)
    world_map = Map()
    world_map.append_landmark(landmark1)
    world_map.append_landmark(landmark2)
    world_map.append_landmark(landmark3)

    world = World(time_span=30, time_interval=0.1)
    for _ in range(10):
        robot = RealRobot(np.array([-2, -1, math.pi * 5 / 6]).T,
                          camera=IdealCamera(world_map))
        agent = FixedInputAgent(robot, vel=0.2, omega=10.0 / 180 * math.pi)
        world.append_agent(agent)

    world.append_object(world_map)
    world.draw()
Esempio n. 16
0
def landmark_from_dict(dikt):
    name = dikt['name']
    repres = representation_from_dict(
        dikt['representation']) if dikt['representation'] else None
    parent = representation_from_dict(
        dikt['parent']) if dikt['parent'] else None
    object_class = dikt['object_class']
    color = dikt['color']

    return Landmark(name, repres, parent, object_class, color)
Esempio n. 17
0
    def test_update_EKF(self):
        robot = Particle(100, 100, 0, is_robot=True)

        landmark1 = Landmark(20, 20)
        landmark2 = Landmark(40, 60)
        landmark_fake2 = Landmark(45, 60)
        landmark3 = Landmark(20, 110)
        robot.landmarks = [landmark1, landmark_fake2, landmark3]

        obs = robot.sense([landmark2], 1)
        for o in obs:
            prob, idx, ass_obs, ass_jacobian, ass_adjcov = robot.find_data_association(
                o)
            self.assertEqual(robot.landmarks[idx].pos(), (45, 60))
            robot.update_landmark(np.transpose(np.array([o])), idx, ass_obs,
                                  ass_jacobian, ass_adjcov)
            self.assertLess(abs(robot.landmarks[idx].pos_x - 39.89615124708),
                            1)
            self.assertLess(abs(robot.landmarks[idx].pos_y - 60.04079368286),
                            1)
Esempio n. 18
0
    def test_guess_landmark(self):
        robot = Particle(100, 100, 0, is_robot=True)

        landmark = Landmark(20, 20)
        landmarks = [landmark]
        obs = robot.sense(landmarks, 1)
        self.assertLess(abs(obs[0][0] - math.hypot(100 - 20, 100 - 20)), 1)

        landmark = robot.guess_landmark(obs[0])
        self.assertLess(abs(landmark.pos_x - 20), 5)
        self.assertLess(abs(landmark.pos_y - 20), 5)
Esempio n. 19
0
 def load_landmarks(self, number_landmarks):
     """Set pixmap items in landmarks position and set"""
     for n in number_landmarks:
         Landmark(n,
                  STATIC_LANDMARKS[n][0],
                  STATIC_LANDMARKS[n][1],
                  30,
                  30,
                  scene=self.ui.scene,
                  flag=QtGui.QGraphicsItem.ItemIsSelectable,
                  text=True)
Esempio n. 20
0
 def create_landmarks_random(self):
     for i in range(self.number_landmarks):
         landmark = Landmark(POLE_LANDMARK_MASK)
         offset = self.wall_thickness + landmark.radius
         placed = False
         while not placed:
             landmark.body.position = self.get_random_pos_within_border(
                 offset)
             if self.env.shape_query(landmark.shape) == []:
                 placed = True
         self.env.add(landmark.body, landmark.shape)
         self.landmarks.append(landmark)
Esempio n. 21
0
    def setUp(self):
        self.sample_response = {
            'routes': [{
                'legs': [{
                    'steps': [{
                        'html_instructions': ("Завийте <b>надясно</b> "
                                              "на <b>бул. „Витоша“</b>")
                    }, {
                        'html_instructions': ("Завийте <b>наляво</b>"
                                              " на <b>ул. „Верила“</b>")
                    }, {
                        'html_instructions': ("Завийте <b>надясно</b> на <b>"
                                              "ул. „Цар Асен I-ви“</b>")
                    }]
                }]
            }]
        }

        self.expected = '''Завийте надясно на бул. „Витоша“
Завийте наляво на ул. „Верила“
Завийте надясно на ул. „Цар Асен I-ви“'''

        self.landmarks = []
        self.landmarks.append(Landmark("Едно", 123, 456))
        self.landmarks.append(Landmark("Две", 123, 456))
        self.landmarks.append(Landmark("Три", 123, 456))

        self.landmarks[0].set_forecast_data(60, 10)
        self.landmarks[0].set_travel_duration(15000)

        self.landmarks[1].set_forecast_data(0, 25)
        self.landmarks[1].set_travel_duration(4000)

        self.landmarks[2].set_forecast_data(30, 20)
        self.landmarks[2].set_travel_duration(8000)

        self.expected_sorted = [
            self.landmarks[1], self.landmarks[2], self.landmarks[0]
        ]
Esempio n. 22
0
 def create_landmarks_random(self):
     for i in range(self.number_landmarks):
         landmark = Landmark(ARC_LANDMARK_MASK, 10)
         offset = self.wall_thickness + landmark.radius
         placed = False
         while not placed:
             x = randint(offset, self.width - offset)
             y = randint(offset, self.height - offset)
             landmark.body.position = x, y
             if self.env.shape_query(landmark.shape) == []:
                 placed = True
         self.env.add(landmark.body, landmark.shape)
         self.landmarks.append(landmark)
Esempio n. 23
0
    def __init__(self, ratio=None, line=LineSegment.from_points([Vec2(0, 0), Vec2(1, 0)]), alt_of=None):
        super(LineRepresentation, self).__init__(alt_of)
        self.ratio = ratio
        self.line = line
        # extend the LineSegment to include a bounding_box field, planar doesn't have that originally
        self.line.bounding_box = BoundingBox.from_points(self.line.points)
        self.num_dim = 1
        self.middle = line.mid
        self.alt_representations = [PointRepresentation(self.line.mid, self)]
        self.ratio_limit = 2

        if ratio is None or ratio >= self.ratio_limit:
            self.landmarks = {
                'start':  Landmark('start',  PointRepresentation(self.line.start), self, Landmark.END),
                'middle': Landmark('middle', PointRepresentation(self.line.mid),   self, Landmark.MIDDLE),
                'end':    Landmark('end',    PointRepresentation(self.line.end),   self, Landmark.END),
            }
        else:
            self.landmarks = {
                'start':  Landmark('start',  PointRepresentation(self.line.start), self, Landmark.SIDE),
                'end':    Landmark('end',    PointRepresentation(self.line.end),   self, Landmark.SIDE)
            }
Esempio n. 24
0
 def setup_world(self, isdyn):
     """Set up landmarks, origin is the left-bottom point"""
     l1 = Landmark(10, 10, isdyn[0])
     l2 = Landmark(25, 60, isdyn[1])
     l3 = Landmark(100, 50, isdyn[2])
     l4 = Landmark(110, 100, isdyn[3])
     l5 = Landmark(160, 88, isdyn[4])
     l6 = Landmark(170, 180, isdyn[5])
     self.landmarks = [l1, l2, l3, l4, l5, l6]
Esempio n. 25
0
def construct_training_scene():
    speaker = Speaker(Vec2(0, 0))
    scene = Scene(3)

    table = Landmark(
        'table',
        RectangleRepresentation(
            rect=BoundingBox([Vec2(-0.4, 0.4), Vec2(0.4, 1.0)])), None,
        ObjectClass.TABLE)

    obj1 = Landmark(
        'green_cup',
        RectangleRepresentation(rect=BoundingBox(
            [Vec2(0.05 - 0.035, 0.9 - 0.035),
             Vec2(0.05 + 0.035, 0.9 + 0.035)]),
                                landmarks_to_get=[]), None, ObjectClass.CUP,
        Color.GREEN)

    obj2 = Landmark(
        'blue_cup',
        RectangleRepresentation(rect=BoundingBox(
            [Vec2(0.05 - 0.035, 0.7 - 0.035),
             Vec2(0.05 + 0.035, 0.7 + 0.035)]),
                                landmarks_to_get=[]), None, ObjectClass.CUP,
        Color.BLUE)

    obj3 = Landmark(
        'pink_cup',
        RectangleRepresentation(rect=BoundingBox(
            [Vec2(0 - 0.035, 0.55 - 0.035),
             Vec2(0 + 0.035, 0.55 + 0.035)]),
                                landmarks_to_get=[]), None, ObjectClass.CUP,
        Color.PINK)

    obj4 = Landmark(
        'purple_prism',
        RectangleRepresentation(rect=BoundingBox(
            [Vec2(-0.3 - 0.03, 0.7 - 0.05),
             Vec2(-0.3 + 0.03, 0.7 + 0.05)]),
                                landmarks_to_get=[]), None, ObjectClass.PRISM,
        Color.PURPLE)

    obj5 = Landmark(
        'orange_prism',
        RectangleRepresentation(rect=BoundingBox(
            [Vec2(0.3 - 0.03, 0.7 - 0.05),
             Vec2(0.3 + 0.03, 0.7 + 0.05)]),
                                landmarks_to_get=[]), None, ObjectClass.PRISM,
        Color.ORANGE)

    scene.add_landmark(table)

    for obj in (obj1, obj2, obj3, obj4, obj5):
        obj.representation.alt_representations = []
        scene.add_landmark(obj)

    return scene, speaker
Esempio n. 26
0
def main():
    landmark1 = Landmark(2, -2)
    landmark2 = Landmark(-1, -3)
    landmark3 = Landmark(3, 3)
    world_map = Map()
    world_map.append_landmark(landmark1)
    world_map.append_landmark(landmark2)
    world_map.append_landmark(landmark3)

    robot1 = IdealRobot(np.array([2, 3, math.pi/6]).T,
                        camera=IdealCamera(world_map))
    agent1 = FixedInputAgent(robot1, vel=0.2, omega=0.0)

    robot2 = IdealRobot(np.array([-2, -1, math.pi*5/6]).T,
                        camera=IdealCamera(world_map),
                        color="red")
    agent2 = FixedInputAgent(robot2, vel=0.2, omega=10.0/180*math.pi)

    world = World(time_span=1000, time_interval=0.1)
    world.append_agent(agent1)
    world.append_agent(agent2)
    world.append_object(world_map)

    world.draw()
def init_landmarks():
    # Replace with the absolute path to your landmarks file before launch
    # Path on the robot laptop
    # '/data/private/robot/robotics/src/project/2d_nav/data/map_landmarks.csv'
    my_path = path.abspath(path.dirname(__file__))
    my_path = path.join(my_path, 'data/map_landmarks.csv')
    print my_path
    f = open(my_path, 'r')

    line = f.readline()
    landmarks = []
    while line:
        split = line.split(',')
        landmarks.append(Landmark(split))
        line = f.readline()
    return landmarks
Esempio n. 28
0
def reconstruct(K, frame1, frame2, matches, inliers, landmark_map):
    '''
        given two frames and matches, reconstruct 3d landmarks in world frame.
        landmark_map, which is a map of index : Landmark,  will be updated
        newly reconstucted landmarks in world frame will be returned
    '''
    new_matched_pts1 = []
    new_matched_kps_idx1 = []
    new_matched_pts2 = []
    new_matched_kps_idx2 = []
    for i, match in enumerate(matches):
        if inliers[i]:
            if frame1.landmark_idx[
                    match.queryIdx] < 0:  # newly reconstructed landmark
                new_matched_pts1.append(frame1.kps[match.queryIdx].pt)
                new_matched_kps_idx1.append(match.queryIdx)
                new_matched_pts2.append(frame2.kps[match.trainIdx].pt)
                new_matched_kps_idx2.append(match.trainIdx)
            else:  # previousely constructed landmark, already in landmark_map
                landmark_idx = frame1.landmark_idx[match.queryIdx]
                assert (landmark_idx in landmark_map)
                landmark_map[landmark_idx].add_observation(
                    frame2.pose, frame2.kps[match.trainIdx].pt)
                frame2.landmark_idx[match.trainIdx] = landmark_idx
    matched_pts1 = np.array(new_matched_pts1)
    matched_pts2 = np.array(new_matched_pts2)
    P1_w2l = K.dot(pose.get_3x4_pose_mat(pose.get_inverse_pose(frame1.pose)))
    P2_w2l = K.dot(pose.get_3x4_pose_mat(pose.get_inverse_pose(frame2.pose)))
    point3d_homo = cv.triangulatePoints(P1_w2l, P2_w2l, matched_pts1.T,
                                        matched_pts2.T)
    point3d_homo /= point3d_homo[3:, :]
    pts_3d = point3d_homo[:3, :].T.tolist()

    new_landmark_idx = 0
    if len(landmark_map):
        new_landmark_idx = max(landmark_map.keys()) + 1
    for i, pt_3d in enumerate(pts_3d):
        assert (new_landmark_idx not in landmark_map)
        landmark_map[new_landmark_idx] = Landmark(pt_3d)
        landmark_map[new_landmark_idx].add_observation(
            frame1.pose, frame1.kps[new_matched_kps_idx1[i]].pt)
        frame1.landmark_idx[new_matched_kps_idx1[i]] = new_landmark_idx
        landmark_map[new_landmark_idx].add_observation(
            frame2.pose, frame2.kps[new_matched_kps_idx2[i]].pt)
        frame2.landmark_idx[new_matched_kps_idx2[i]] = new_landmark_idx
        new_landmark_idx += 1
    return pts_3d
Esempio n. 29
0
File: main.py Progetto: achic19/lead
    def __init__(self, neighborhood, folder, what_to_run, landmark_function_to_run, name):
        """
        :param neighborhood: which neiberhood exam accessibility
        :param folder: to save the results
        :param what_to_run: in developing stage no all the function should run
        :param landmark_function_to_run: many steps in this class , so sometime I run only part of them

        """
        # build the network based on the neighborhood
        # save absulute path to pedestrian_flow_model before changing workspace folder
        pedestrian_flow_folder = os.path.dirname(__file__) + '/pedestrian_flow'
        os.chdir(folder)
        if what_to_run['Network']:
            print('run Network')
            Network(neighborhood, name + '_ntwrk.shp')

        if what_to_run['get_features']:
            print(' get_features')
            gdb = gpd.read_file(name + '_ntwrk.shp')
            GetFeatures(gdb, neighborhood, name + '_features.shp')

        if what_to_run['pedestrian flow']:
            print(' pedestrian flow')
            GetFeatures.calculate_padestrain_flow(pedestrian_flow_folder + '/finalized_model.sav',
                                                  name + '_features.shp',
                                                  pedestrian_flow_folder, name + '_ped_flow.shp')

        # Calculate landmark criterion
        if what_to_run['landmark']:
            print('run landmark')
            Landmark(landmark_function_to_run, neighborhood, name)

        # Calculate waytype criterion
        if what_to_run['WayType']:
            print('run WayType')
            WayType(name)

        # Calculate complexity criterion
        if what_to_run['Complexity']:
            print('run Complexity')
            Complexity(name)

        # Calculate final cost
        if what_to_run['Final']:
            print('run Final')
            Final(name)
Esempio n. 30
0
def test_move():
    robot = RobotBin(20, 175, 'car.png', None, [], [], None)
    hfilter = HistogramFilter(1, 6, 6, [Landmark(0, 0, None)], robot)
    hfilter.belief = np.array([[1., 1., 1., 1., 1., 1.],
                               [1., 1., 1., 1., 2., 4.],
                               [1., 6., 7., 4., 1., 1.],
                               [1., 6.5, 10., 1., 1., 1.],
                               [1., 8., 3., 3., 1., 1.],
                               [1., 1., 1., 1., 1., 1.]])

    hfilter.motion_update((0, 0))
    hfilter.motion_update((3, 2))

    correct = np.array([[
        0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
        0.00000000e+00, 0.00000000e+00
    ],
                        [
                            0.00000000e+00, 0.00000000e+00, 5.59269337e-13,
                            3.29315482e-09, 3.29371409e-09, 3.29315482e-09
                        ],
                        [
                            0.00000000e+00, 0.00000000e+00, 1.69798993e-04,
                            9.99830198e-01, 9.99999997e-01, 9.99830198e-01
                        ],
                        [
                            0.00000000e+00, 0.00000000e+00, 1.69798994e-04,
                            9.99830201e-01, 1.00000002e+00, 9.99830221e-01
                        ],
                        [
                            0.00000000e+00, 0.00000000e+00, 1.69798994e-04,
                            1.00067920e+00, 5.99932079e+00, 6.99864160e+00
                        ],
                        [
                            0.00000000e+00, 0.00000000e+00, 1.69798993e-04,
                            1.00076409e+00, 6.49966038e+00, 9.99770767e+00
                        ]])
    np.testing.assert_allclose(hfilter.belief, correct, rtol=1e-5)
Esempio n. 31
0
def read_and_fill(category, address, days):
    """
    Reads the information about the landmarks from the chosen category.
    Returns a list of landmarks.
    """
    landmarks = []

    db_con = sqlite3.connect('landmarks.db')
    db = db_con.cursor()

    for row in db.execute('SELECT * FROM {}'.format(category)):
        # row[1] and row[2] - coordinates of the landmark
        forecast_data = get_forecast_info(row[1], row[2], days)

        if forecast_data is None:
            return []

        if forecast_data['list'][days - 1]['weather'][0]['main'] == 'Clear' or\
                forecast_data['list'][days - 1]['weather'][0]['main'] == \
                'Clouds':
            landmarks.append(Landmark(row[0], row[1], row[2]))

            # was forecast_data['list'][1]['clouds']['all'] before
            # forecast_data['list'][1]['temp']['day']['main']
            landmarks[-1].set_forecast_data(
                forecast_data['list'][days - 1]['clouds'],
                forecast_data['list'][days - 1]['temp']['day'])

            if address is None:
                duration = get_duration(row[1], row[2])
                landmarks[-1].set_travel_duration(duration)
            else:
                duration = get_duration_from_address(address, row[1], row[2])
                landmarks[-1].set_travel_duration(duration)

    return landmarks
	def predictionAndUpdateOneParticle(self, X, dt, dt2, noise, keypoints, step, P, X1, P1, dt_camera, noise_o):
		
		"""
		FastSLAM1.0
		"""
		
		weight = 0.0 # weight (return value)
		weights = []
		count_of_first_observation = 0
		
		# 姿勢予測 prediction of position
		X_ = Particle()
		X_.landmarks = X.landmarks
		X_.x = X.x + dt*X.v + dt2*X.a + noise
		#X_.v = X.v + dt*X.a
		#X_.v = X.v + dt*X.a + noise*25
		#X_.v = X.v + dt*X.a + noise*50
		X_.a = X.a
		X_.o = X.o
		#X_.o = X.o + noise_o
		
		# 速度調整 velocity adjustment
		#X_.v = ((X_.x - X1.x)/dt_camera)
		X_.v = ((X_.x - X1.x)/dt_camera)*0.25
		#X_.v = ((X_.x - X1.x)/dt_camera)*0.5
		
		# 共面条件モデルのための計算 Calc for Coplanarity
		xyz = np.array([X_.x[0] - X1.x[0], X_.x[1] - X1.x[1], X_.x[2] - X1.x[2]])
		
		# 前回ランドマークの全てのキーを取得しておく あとで消す
		prevLandmarkKeys = []
		for key in X_.landmarks:
			prevLandmarkKeys.append(key)
		
		for keypoint in keypoints:
			
			# ---------------------------- #
			# Calc weight of Inverse Depth #
			# ---------------------------- #
			# previous landmark id
			prevLandmarkId = (step-1)*10000 + keypoint.prevIndex
			# new landmark id
			landmarkId = step*10000 + keypoint.index
			# The landmark is already observed or not?
			if(X_.landmarks.has_key(prevLandmarkId) == False):
				# Fisrt observation
				# Initialize landmark and append to particle
				landmark = Landmark()
				landmark.initPrev(X1, keypoint, P1, self.focus)
				X_.landmarks[landmarkId] = landmark
				if(self.count == 0):
					count_of_first_observation += 1
			else:
				# Already observed
				X_.landmarks[landmarkId] = X_.landmarks[prevLandmarkId]
				#del X_.landmarks[prevLandmarkId]
			# Update landmark and calc weight
			# Observation z				
			z = np.array([keypoint.x, keypoint.y])
			# Calc h and H (Jacobian matrix of h)
			h, Hx, H = X_.landmarks[landmarkId].calcObservation(X_, self.focus)
			# Kalman filter (Landmark update)
			S = H.dot(X_.landmarks[landmarkId].sigma.dot(H.T)) + self.R
			Sinv = np.linalg.inv(S)
			K = X_.landmarks[landmarkId].sigma.dot(H.T.dot(Sinv))
			X_.landmarks[landmarkId].mu += K.dot(z - h)
			X_.landmarks[landmarkId].sigma = X_.landmarks[landmarkId].sigma - K.dot(H.dot(X_.landmarks[landmarkId].sigma))
			# Calc weight
			w = 0.0
			try:
				#w = (1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * S) ))) * np.exp( -0.5 * ( (z-h).T.dot(Sinv.dot(z-h)) ) )
				#w = (1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * self.R) ))) * np.exp( -0.5 * ( (z-h).T.dot(self.Rinv.dot(z-h)) ) )
				# log likelihood
				#w = np.log(1.0 / (math.sqrt( np.linalg.det(2.0 * math.pi * self.R) ))) + ( -0.5 * ( (z-h).T.dot(self.Rinv.dot(z-h)) ) )
				w = ( -0.5 * ( (z-h).T.dot(self.Rinv.dot(z-h)) ) )
			except:
				print("Error on calc inverse weight ******")
				w = 0.0
			
			# -------------------------- #
			# Calc weight of Coplanarity #
			# -------------------------- #
			# Generate uvw1 (time:t-1)
			uvf1 = np.array([keypoint.x1, -keypoint.y1, -self.focus]) # Camera coordinates -> Device coordinates
			rotX = Util.rotationMatrixX(X1.o[0])
			rotY = Util.rotationMatrixY(X1.o[1])
			rotZ = Util.rotationMatrixZ(X1.o[2])
			# uvw1 = R(z)R(y)R(x)uvf1
			uvw1 = np.dot(rotZ,np.dot(rotY,np.dot(rotX,uvf1)))
			uvw1 /= 100.0 # Adjust scale to decrease calculation error. This doesn't have an influence to estimation.
			# Generate uvw2 (time:t)
			uvf2 = np.array([keypoint.x, -keypoint.y, -self.focus]) # Camera coordinates -> Device coordinates
			rotX = Util.rotationMatrixX(X_.o[0])
			rotY = Util.rotationMatrixY(X_.o[1])
			rotZ = Util.rotationMatrixZ(X_.o[2])
			# uvw2 = R(z)R(y)R(x)uvf2
			uvw2 = np.dot(rotZ,np.dot(rotY,np.dot(rotX,uvf2)))
			uvw2 /= 100.0 # Adjust scale to decrease calculation error. This doesn't have an influence to estimation.
			# Generate coplanarity matrix
			coplanarity_matrix = np.array([xyz,uvw1,uvw2])
			# Calc determinant
			determinant = np.linalg.det(coplanarity_matrix)
			# Weight
			w_coplanarity = 0.0
			try:
				#w_coplanarity = (1.0 / (math.sqrt( 2.0 * math.pi * self.noise_coplanarity**2 ))) * np.exp((determinant**2) / (-2.0 * (self.noise_coplanarity**2)) )
				# log likelihood				
				#w_coplanarity = np.log(1.0 / (math.sqrt( 2.0 * math.pi * self.noise_coplanarity**2 ))) + ((determinant**2) / (-2.0 * (self.noise_coplanarity**2)) )				
				w_coplanarity = ((determinant**2) / (-2.0 * (self.noise_coplanarity**2)) )
			except:
				print("Error on calc coplanarity weight ******")
				w_coplanarity = 0.0
			
			# --------------------------- #
			# inverse depth * coplanarity #
			# --------------------------- #
			if(self.count == 0):
				#print(w),
				#print(w_coplanarity)
				pass
			
			w = w + w_coplanarity  # use inverse depth * coplanarity log likelihood
			#w = w_coplanarity      # use only coplanarity log likelihood
			#w = w                  # use only inverse depth log likelihood
			
			weights.append(w)
			
		# ----------------------------------- #
		# sum log likelihood of all keypoints #
		# ----------------------------------- #
		for i,w in enumerate(weights):
			if(i==0):
				weight = w
			else:
				weight += w
				
		# ----------------------------- #
		# Average of log likelihood sum #
		# ----------------------------- #
		weight /= float(len(weights))
		
		###############################
		#print("weight "+str(weight))
		if(self.count == 0):
			#print("weight "+str(weight))
			#print("first_ratio "+str(float(count_of_first_observation)/float(len(keypoints)))),
			#print("("+str(count_of_first_observation)+"/"+str(len(keypoints))+")")
			pass
		###########################
			
		# 前回ランドマークをすべて消す
		for key in prevLandmarkKeys:
			del X_.landmarks[key]
			
		self.count+=1
		
		return X_, weight