Example #1
0
    def test_tform_w2e_numpy_point(self):
        # In left-handed z-up coordinate system
        location = carla.Location(10, 0, 0)
        # In right-handed z-down coordinate system
        rotation = carla.Rotation(yaw=90)
        carla_transform = carla.Transform(location, rotation)

        tform = Transform(carla_transform)

        # In right-handed z-up coordinate system
        np_pts_in_world = np.array([0, -10, 0])
        # In right-handed z-up coordinate system
        np_pts_in_ego = tform.tform_w2e_numpy_array(np_pts_in_world)

        np.testing.assert_array_almost_equal(
            np_pts_in_ego,
            np.array([10, -10, 0]).reshape((3, -1)))
Example #2
0
    def test_rot_e2w_numpy_points(self):
        # In left-handed z-up coordinate system
        location = carla.Location(10, 0, 0)
        # In right-handed z-down coordinate system
        rotation = carla.Rotation(yaw=90)
        carla_transform = carla.Transform(location, rotation)

        tform = Transform(carla_transform)

        # In right-handed z-up coordinate system
        np_pts_in_ego = np.array([[0, 10, 0], [10, -10, 0]]).T
        # In right-handed z-up coordinate system
        np_pts_in_world = tform.rot_e2w_numpy_array(np_pts_in_ego)

        np.testing.assert_array_almost_equal(
            np_pts_in_world,
            np.array([[10, 0, 0], [-10, -10, 0]]).T.reshape((3, -1)))
Example #3
0
    def test_tform_e2w_carla_vector3D(self):
        # In left-handed z-up coordinate system
        location = carla.Location(10, 0, 0)
        # In right-handed z-down coordinate system
        rotation = carla.Rotation(yaw=90)
        carla_transform = carla.Transform(location, rotation)

        tform = Transform(carla_transform)

        # In left-handed z-up coordinate system
        location_in_ego = carla.Location(10, 10, 0)

        # In right-handed z-up coordinate system
        np_pt_in_world = tform.tform_e2w_carla_vector3D(location_in_ego)

        np.testing.assert_array_almost_equal(np_pt_in_world,
                                             np.array([0, -10, 0]))
Example #4
0
    def test_tform_w2e_carla_vector3D(self):
        # In left-handed z-up coordinate system
        location = carla.Location(10, 0, 0)
        # In right-handed z-down coordinate system
        rotation = carla.Rotation(yaw=90)
        carla_transform = carla.Transform(location, rotation)

        tform = Transform(carla_transform)

        # In left-handed z-up coordinate system
        location_in_world = carla.Location(0, 10, 0)

        # In right-handed z-up coordinate system
        np_pt_in_ego = tform.tform_w2e_carla_vector3D(location_in_world)

        self.assertAlmostEqual(np_pt_in_ego[0], 10.0)
        self.assertAlmostEqual(np_pt_in_ego[1], -10.0)
        self.assertAlmostEqual(np_pt_in_ego[2], 0.0)
    def update(self):
        """ Wait for IMU measurement and update data. """
        # get() blocks the script so synchronization is guaranteed
        event = self._queue.get()

        # Convert to right-handed z-up frame
        self.data['timestamp'] = event.timestamp
        self.data['accel_x'] = event.accelerometer.x
        self.data['accel_y'] = -event.accelerometer.y
        self.data['accel_z'] = event.accelerometer.z
        self.data['gyro_x'] = event.gyroscope.x
        self.data['gyro_y'] = -event.gyroscope.y
        self.data['gyro_z'] = -event.gyroscope.z

        # Velocities
        vel = self._parent.get_velocity()
        tform = Transform(self._parent.get_transform())
        # Transform velocities from Carla world frame (left-handed) to ego frame (right-handed)
        ego_vel = tform.rot_w2e_carla_vector3D(vel)  # an np 3D vector
        self.data['vx'] = ego_vel[0] + \
            self._delta_seconds * self.data['accel_x']
        self.data['vy'] = ego_vel[1] + \
            self._delta_seconds * self.data['accel_y']
        self._add_velocity_noise()
Example #6
0
def get_fbumper_location(raxle_location, raxle_orientation,
                         dist_raxle_to_fbumper):
    """
    Helper function to get the front bumper's location in right-haned z-up frame given the pose of the rear axle.

    Input:
        location: Array-like (x, y, z) coordinate.
        orientation: Array-like (roll, pitch, yaw) in rad.
    Output:
        1D numpy.array representing a 3D point in the right-handed z-up coordinate system.
    """
    tform = Transform.from_conventional(raxle_location, raxle_orientation)
    fbumper_pt_in_ego = np.array([dist_raxle_to_fbumper, 0, 0])
    # make it 1D
    return tform.tform_e2w_numpy_array(fbumper_pt_in_ego).squeeze()
Example #7
0
    def test_from_conventional_with_column_vector(self):
        # In right-handed z-up coordinate system
        conventional_location = np.array([0, 10, 10]).reshape((3, 1))
        conventional_orientation = np.array([0, 0, np.pi / 2]).reshape((3, 1))
        tform = Transform.from_conventional(conventional_location,
                                            conventional_orientation)

        # In right-handed z-up coordinate system
        np_pts_in_world = np.array([0, -10, 0])
        # In right-handed z-up coordinate system
        np_pts_in_ego = tform.tform_w2e_numpy_array(np_pts_in_world)

        np.testing.assert_array_almost_equal(
            np_pts_in_ego,
            np.array([-20, 0, -10]).reshape((3, -1)))
    def update(self, location, rotation):
        """
        Update road surface stop sign ground truth at the current tick. 

        The ground truth result is wrt to the given pose.
        e.g. If the front bumper's pose is given, the resulting stop sign ground truth are wrt the front bumper.

        Input:
            location: Array-like 3D query point in world (right-handed z-up).
            rotation: Array-like roll pitch yaw rotation representation in rad (right-handed z-up). 
        Output:
            Dictionary container for the ground truth.
        """
        # Find stop signs on road in the neighborhood
        nearest_idc = self._kd_coords.query_ball_point(location[0:3],
                                                       r=self._radius)

        if not nearest_idc:
            self.gt['visible_rs_stop'] = None
            return self.gt

        # Pick out coordinates of interest
        candidate_coords = []
        for idx in nearest_idc:
            # Compare yaw angles. Only pick those with within yaw difference threshold.
            curr_sign = self.rs_stops[idx]
            yaw_diff = abs(curr_sign.yaw - rotation[2])

            if ((yaw_diff < self._max_yaw_diff)
                    or (np.pi - self._max_yaw_diff < yaw_diff <
                        np.pi + self._max_yaw_diff)
                    or (yaw_diff > 2 * np.pi - self._max_yaw_diff)):
                candidate_coords.append(self.rs_stop_coords[idx])

        # Make it a numpy array where each column is a 3D point
        candidate_coords = np.asarray(candidate_coords).T

        # Transform the coordinates into ego frame
        w2e_tform = Transform.from_conventional(location, rotation)
        # Each column is a 3D point in ego frame
        candidate_coords_in_ego = w2e_tform.tform_w2e_numpy_array(
            candidate_coords)

        # Filter out signs behind
        candidate_coords_in_ego = candidate_coords_in_ego[:,
                                                          candidate_coords_in_ego[
                                                              0, :] > 0]

        # Filter out signs with large lateral offsets
        candidate_coords_in_ego = candidate_coords_in_ego[:,
                                                          np.
                                                          abs(candidate_coords_in_ego[
                                                              1, :]) < self.
                                                          _max_lateral_offset]

        if candidate_coords_in_ego.size == 0:
            self.gt['visible_rs_stop'] = None
        else:
            # Each column is a 3D point in ego frame
            self.gt['visible_rs_stop'] = candidate_coords_in_ego

        return self.gt
    def _find_candidate_markings(self):
        """ 
        Find candidate lane markings withing a radius at the current time step.

        Output:
            candidate_markings_in_ego:
                List of candidate lane markings. Each lane marking consists of a list of 
                3D points in the ego frame (right-handed z-up).
            candidate_markings:
                List containing carla.LaneMarking objects corresponding to the points
                in candidate_markings_in_ego.
        """
        # Object for transforming a carla.Location in carla's world frame (left-handed z-up)
        # into our front bumper's frame (right-handed z-up)
        tform = Transform(self._carla_tform)
        waypt_ego_frame = tform.tform_w2e_carla_vector3D(
            self.waypoint.transform.location)

        # Container lists
        # A list of points of each candidate marking
        # Each candidate marking has a list of 3D points in front bumper's frame (z-up)
        candidate_markings_in_ego = []
        # A 2D list of carla.LandMarking object of each candidate marking point
        candidate_markings = []

        # Left
        # Initilization with current waypoint
        left_marking_waypt = self.waypoint
        cum_dist = waypt_ego_frame[1] + \
            0.5 * left_marking_waypt.lane_width
        # Search left till cumulative distance exceeds radius or a None waypoint is reached
        while cum_dist < self._radius:
            # Get left marking of current waypoint
            left_marking = self._get_lane_marking(left_marking_waypt,
                                                  Direction.Left)
            if left_marking.type != carla.LaneMarkingType.NONE:
                # A candidate found
                marking_pts, marking_objs = self._get_marking_pts(
                    left_marking_waypt, tform, Direction.Left)

                candidate_markings_in_ego.append(marking_pts)
                candidate_markings.append(marking_objs)

            # Stop when reaching a curb
            if left_marking.type == carla.LaneMarkingType.Curb:
                break
            # Go to the next left lane
            left_marking_waypt = self._get_next_lane(left_marking_waypt,
                                                     Direction.Left)
            if left_marking_waypt is not None:
                cum_dist += left_marking_waypt.lane_width
            else:
                # Stop when next lane waypoint is None
                break

        # Right
        # Initilization with current waypoint
        right_marking_waypt = self.waypoint
        cum_dist = waypt_ego_frame[1] + \
            0.5 * right_marking_waypt.lane_width
        # Search right till cumulative distance exceeds radius or a None waypoint is reached
        while cum_dist < self._radius:
            # Get right marking of current waypoint
            right_marking = self._get_lane_marking(right_marking_waypt,
                                                   Direction.Right)
            if right_marking.type != carla.LaneMarkingType.NONE:
                # A candidate found
                marking_pts, marking_objs = self._get_marking_pts(
                    right_marking_waypt, tform, Direction.Right)

                candidate_markings_in_ego.append(marking_pts)
                candidate_markings.append(marking_objs)

            # Stop when reaching a curb
            if right_marking.type == carla.LaneMarkingType.Curb:
                break
            # Go to the next right lane
            right_marking_waypt = self._get_next_lane(right_marking_waypt,
                                                      Direction.Right)
            if right_marking_waypt is not None:
                cum_dist += right_marking_waypt.lane_width
            else:
                # Stop when next lane waypoint is None
                break

        return candidate_markings_in_ego, candidate_markings
Example #10
0
    def error(self, variables):
        ########## Expectation ##########
        pose = variables.at(self.keys()[0])
        location = np.append(pose.translation(), self.z)  # append z
        orientation = np.array([0, 0, pose.so2().theta()])

        if self._first_time:
            # Store the initially guessed pose when computing error the first time
            self._init_tform = Transform.from_conventional(
                location, orientation)
            self._init_orientation = orientation

        if self.static:
            # Static mode
            if self._first_time:
                # First time extracting expected land boundaries
                fbumper_location = get_fbumper_location(
                    location, orientation, self.px)
                self.in_junction, self.into_junction, self.me_format_expected_markings = self.expected_lane_extractor.extract(
                    fbumper_location, orientation)

                expected_coeffs_list = [expected.get_c0c1_list()
                                        for expected in self.me_format_expected_markings]
                expected_type_list = [expected.type
                                      for expected in self.me_format_expected_markings]

                # The snapshot is stored in their normal forms; i.e. a, b, c, and alpha describing the lines
                self._init_normal_forms = [compute_normal_form_line_coeffs(self.px, c[0], c[1])
                                           for c in expected_coeffs_list]
                # Snapshot of lane boundary types
                self._init_types = expected_type_list

                self._first_time = False
            else:
                # Not first time, use snapshot of lane boundaries extracted the first time to compute error
                # Pose difference is wrt local frame
                pose_diff = self._get_pose_diff(location, orientation)

                # Compute expected lane boundary coefficients using the snapshot
                expected_coeffs_list = []
                for normal_form in self._init_normal_forms:
                    c0c1 = self._compute_expected_c0c1(normal_form, pose_diff)
                    expected_coeffs_list.append(c0c1)
                # Retrieve lane boundary types from snapshot
                expected_type_list = self._init_types
        else:
            # Not static mode
            # Extract ground truth from the Carla server
            fbumper_location = get_fbumper_location(
                location, orientation, self.px)
            self.in_junction, self.into_junction, self.me_format_expected_markings = self.expected_lane_extractor.extract(
                fbumper_location, orientation)

            # List of expected markings' coefficients
            expected_coeffs_list = [expected.get_c0c1_list()
                                    for expected in self.me_format_expected_markings]

            # List of expected markings' type
            expected_type_list = [expected.type
                                  for expected in self.me_format_expected_markings]

        ########## Measurement ##########
        if self.left_marking:
            measured_coeffs_left = np.asarray(
                self.left_marking.get_c0c1_list()).reshape(2, -1)
            measured_type_left = self.left_marking.type

        if self.right_marking:
            measured_coeffs_right = np.asarray(
                self.right_marking.get_c0c1_list()).reshape(2, -1)
            measured_type_right = self.right_marking.type

        # Null hypothesis
        # Use the measurement itself at every optimization iteration as the null hypothesis.
        # This is, of course, just a trick.
        # This means the error for null hypothesis is always zeros.
        null_expected_c0c1_left = self.left_marking.get_c0c1_list()
        null_expected_c0c1_right = self.right_marking.get_c0c1_list()
        null_error = np.zeros((2, 1))   # same for both left and right

        # Compute innovation matrix for the null hypo
        null_noise_cov = self.noise_cov * self.null_std_scale**2

        # Compute measurement likelihood weighted by null probability
        null_weighted_meas_likelihood = self.prob_null * \
            multivariate_normal.pdf(null_error.squeeze(), cov=null_noise_cov)

        # In this implementation, scaling down error and jacobian is done to achieve
        # the same effect of tuning the information matrix online.
        # Here, however, scale down error for null hypo; i.e.
        # null_error /= self.null_std_scale
        # is not necessary, since its always zero.
        # Zero error and jacobian effectively result in zero information matrix as well.

        if self.ignore_junction and (self.in_junction or self.into_junction):
            self._null_hypo_left = True
            self._null_hypo_right = True
        elif not expected_coeffs_list:
            self._null_hypo_left = True
            self._null_hypo_right = True
        else:
            # Data association
            num_expected_markings = len(self.me_format_expected_markings)
            asso_table = np.zeros((2, num_expected_markings+2))

            # Left lane marking
            errors_left = []
            asso_probs = []
            meas_likelihoods = []
            for exp_coeffs, exp_type in zip(expected_coeffs_list, expected_type_list):
                # Compute innovation matrix
                expected_c0, expected_c1 = exp_coeffs
                H = compute_H(self.px, expected_c0, expected_c1)
                innov = H @ self.pose_uncert @ H.T + self.noise_cov

                # Compute squared mahalanobis distance
                error = np.asarray(exp_coeffs).reshape(
                    2, -1) - measured_coeffs_left
                squared_mahala_dist = error.T @ np.linalg.inv(
                    innov) @ error

                # Semantic likelihood
                if self.semantic:
                    # Conditional probability on type
                    sem_likelihood = self._conditional_prob_type(
                        exp_type, measured_type_left)
                else:
                    # Truning off semantic association is equivalent to always
                    # set semantic likelihood to 1.0
                    sem_likelihood = 1.0

                # Gating (geometric and semantic)
                # Reject both geometrically and semantically unlikely associations
                # Note:
                # Since location distribution across lanes is essentially multimodal,
                # geometric gating often used when assuming location is unimodal is not
                # very reasonable and will inevitablly reject possible associations.
                # Here the geometric gate is set very large so we can preserve associations that
                # are a bit far from the current mode (the only mode that exists in the optimization)
                # but still possible when the multimodal nature is concerned.
                # Or, we can simply give up geometric gating and use semantic gating only.
                # The large geometric gate is an inelegant remedy after all.
                # if squared_mahala_dist <= self.geo_gate and sem_likelihood > self.sem_gate:
                if sem_likelihood > self.sem_gate:
                    errors_left.append(error)

                    # Measurement likelihood (based on noise cov)
                    meas_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=self.noise_cov)

                    # Geometric likelihood (based on innov)
                    geo_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=innov)

                    meas_likelihoods.append(meas_likelihood)
                    asso_prob = geo_likelihood * sem_likelihood
                    asso_probs.append(asso_prob)
                else:
                    errors_left.append(None)
                    meas_likelihoods.append(0)
                    asso_probs.append(0)

            asso_probs = np.asarray(asso_probs)
            meas_likelihoods = np.asarray(meas_likelihoods)

            # Compute weights based on total probability theorem
            if asso_probs.sum():
                weights_left = (1-self.prob_null) * \
                    (asso_probs/np.sum(asso_probs))
            else:
                weights_left = np.zeros(asso_probs.shape)

            # Weight measurement likelihoods
            weighted_meas_likelihood = weights_left*meas_likelihoods

            # Add weight and weighted likelihood of null hypothesis
            weights_left = np.insert(weights_left, 0, [self.prob_null, 0])
            weighted_meas_likelihood = np.insert(
                weighted_meas_likelihood, 0, [null_weighted_meas_likelihood, 0])

            asso_table[0, :] = weighted_meas_likelihood

            # Right marking
            errors_right = []
            asso_probs = []
            meas_likelihoods = []
            for exp_coeffs, exp_type in zip(expected_coeffs_list, expected_type_list):
                # Compute innovation matrix
                expected_c0, expected_c1 = exp_coeffs
                H = compute_H(self.px, expected_c0, expected_c1)
                innov = H @ self.pose_uncert @ H.T + self.noise_cov

                # Compute squared mahalanobis distance
                error = np.asarray(exp_coeffs).reshape(
                    2, -1) - measured_coeffs_right
                squared_mahala_dist = error.T @ np.linalg.inv(
                    innov) @ error

                # Semantic likelihood
                if self.semantic:
                    # Conditional probability on type
                    sem_likelihood = self._conditional_prob_type(
                        exp_type, measured_type_right)
                else:
                    # Truning off semantic association is equivalent to always
                    # set semantic likelihood to 1.0
                    sem_likelihood = 1.0

                # Gating (geometric and semantic)
                # Reject both geometrically and semantically unlikely associations
                # Note:
                # Since location distribution across lanes is essentially multimodal,
                # geometric gating often used when assuming location is unimodal is not
                # very reasonable and will inevitablly reject possible associations.
                # Here the geometric gate is set very large so we can preserve associations that
                # are a bit far from the current mode (the only mode that exists in the optimization)
                # but still possible when the multimodal nature is concerned.
                # Or, we can simply give up geometric gating and use semantic gating only.
                # The large geometric gate is an inelegant remedy after all.
                # if squared_mahala_dist <= self.geo_gate and sem_likelihood > self.sem_gate:
                if sem_likelihood > self.sem_gate:
                    errors_right.append(error)

                    # Measurement likelihood (based on noise cov)
                    meas_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=self.noise_cov)

                    # Geometric likelihood (based on innov)
                    geo_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=innov)

                    meas_likelihoods.append(meas_likelihood)
                    asso_prob = geo_likelihood * sem_likelihood
                    asso_probs.append(asso_prob)
                else:
                    errors_right.append(None)
                    meas_likelihoods.append(0)
                    asso_probs.append(0)

            asso_probs = np.asarray(asso_probs)
            meas_likelihoods = np.asarray(meas_likelihoods)

            # Compute weights based on total probability theorem
            if asso_probs.sum():
                weights_right = (1-self.prob_null) * \
                    (asso_probs/np.sum(asso_probs))
            else:
                weights_right = np.zeros(asso_probs.shape)

            # Weight measurement likelihoods
            weighted_meas_likelihood = weights_right*meas_likelihoods

            # Add weight and weighted likelihood of null hypothesis
            weights_right = np.insert(weights_right, 0, [0., self.prob_null])
            weighted_meas_likelihood = np.insert(
                weighted_meas_likelihood, 0, [0., null_weighted_meas_likelihood, ])

            asso_table[1, :] = weighted_meas_likelihood

            # GNN association
            # This is performed at every optimization step, so this factor is essentially
            # a max-mixture factor. It's just now the max operation is replaced by the
            # linear sum assignment operation.

            # Take log so the association result maximizes the product of likelihoods
            # of the associations of the both sides
            log_asso_table = np.log(asso_table)
            _, col_idc = lsa(log_asso_table, maximize=True)

            asso_idx_left = col_idc[0]
            asso_idx_right = col_idc[1]

            # Left assocation
            if asso_idx_left == 0:
                # Null hypothesis
                self._null_hypo_left = True
            else:
                self._null_hypo_left = False
                chosen_error_left = errors_left[asso_idx_left-2]
                self.chosen_expected_coeffs_left = expected_coeffs_list[asso_idx_left-2]
                self._scale_left = weights_left[asso_idx_left]

            # Right assocation
            if asso_idx_right == 1:
                # Null hypothesis
                self._null_hypo_right = True
            else:
                self._null_hypo_right = False
                chosen_error_right = errors_right[asso_idx_right-2]
                self.chosen_expected_coeffs_right = expected_coeffs_list[asso_idx_right-2]
                self._scale_right = weights_right[asso_idx_right]

        if self._null_hypo_left:
            chosen_error_left = null_error
            self.chosen_expected_coeffs_left = null_expected_c0c1_left
        if self._null_hypo_right:
            chosen_error_right = null_error
            self.chosen_expected_coeffs_right = null_expected_c0c1_right

        chosen_error = np.concatenate((chosen_error_left, chosen_error_right))

        return chosen_error
Example #11
0
    def error(self, variables):
        ########## Expectation ##########
        pose = variables.at(self.keys()[0])
        location = np.append(pose.translation(), self.z)  # append z
        orientation = np.array([0, 0, pose.so2().theta()])

        if self._first_time:
            # Store the initially guessed pose when computing error the first time
            self._init_tform = Transform.from_conventional(
                location, orientation)
            self._init_orientation = orientation

        if self.static:
            # Static mode
            if self._first_time:
                # First time extracting expected land boundaries
                fbumper_location = get_fbumper_location(
                    location, orientation, self.px)
                self.in_junction, self.into_junction, self.me_format_expected_markings = self.expected_lane_extractor.extract(
                    fbumper_location, orientation)

                expected_coeffs_list = [expected.get_c0c1_list()
                                        for expected in self.me_format_expected_markings]
                expected_type_list = [expected.type
                                      for expected in self.me_format_expected_markings]

                # The snapshot is stored in their normal forms; i.e. a, b, c, and alpha describing the lines
                self._init_normal_forms = [compute_normal_form_line_coeffs(self.px, c[0], c[1])
                                           for c in expected_coeffs_list]
                # Snapshot of lane boundary types
                self._init_types = expected_type_list

                self._first_time = False
            else:
                # Not first time, use snapshot of lane boundaries extracted the first time to compute error
                # Pose difference is wrt local frame
                pose_diff = self._get_pose_diff(location, orientation)

                # Compute expected lane boundary coefficients using the snapshot
                expected_coeffs_list = []
                for normal_form in self._init_normal_forms:
                    c0c1 = self._compute_expected_c0c1(normal_form, pose_diff)
                    expected_coeffs_list.append(c0c1)
                # Retrieve lane boundary types from snapshot
                expected_type_list = self._init_types
        else:
            # Not static mode
            # Extract ground truth from the Carla server
            fbumper_location = get_fbumper_location(
                location, orientation, self.px)
            self.in_junction, self.into_junction, self.me_format_expected_markings = self.expected_lane_extractor.extract(
                fbumper_location, orientation)

            # List of expected markings' coefficients
            expected_coeffs_list = [expected.get_c0c1_list()
                                    for expected in self.me_format_expected_markings]

            # List of expected markings' type
            expected_type_list = [expected.type
                                  for expected in self.me_format_expected_markings]

        ########## Measurement ##########
        measured_coeffs = np.asarray(
            self.detected_marking.get_c0c1_list()).reshape(2, -1)
        measured_type = self.detected_marking.type

        # Null hypothesis
        # Use the measurement itself at every optimization iteration as the null hypothesis.
        # This is, of course, just a trick.
        # This means the error for null hypothesis is always zeros.
        null_expected_c0c1 = measured_coeffs.squeeze().tolist()
        null_error = np.zeros((2, 1))

        # Compute innovation matrix for the null hypo
        null_noise_cov = self.noise_cov * self.null_std_scale**2

        # Compute measurement likelihood weighted by null probability
        null_weighted_meas_likelihood = self.prob_null * \
            multivariate_normal.pdf(null_error.squeeze(), cov=null_noise_cov)

        # In this implementation, scaling down error and jacobian is done to achieve
        # the same effect of tuning the information matrix online.
        # Here, however, scale down error for null hypo; i.e.
        # null_error /= self.null_std_scale
        # is not necessary, since its always zero.
        # Zero error and jacobian effectively result in zero information matrix as well.

        if self.ignore_junction and (self.in_junction or self.into_junction):
            self._null_hypo = True
        elif not expected_coeffs_list:
            self._null_hypo = True
        else:
            # Data association
            errors = [null_error]
            gated_coeffs_list = [null_expected_c0c1]
            asso_probs = []
            meas_likelihoods = []
            sem_likelihoods = []
            for exp_coeffs, exp_type in zip(expected_coeffs_list, expected_type_list):
                # Compute innovation matrix
                expected_c0, expected_c1 = exp_coeffs
                H = compute_H(self.px, expected_c0, expected_c1)
                innov = H @ self.pose_uncert @ H.T + self.noise_cov

                # Compute squared mahalanobis distance
                error = np.asarray(exp_coeffs).reshape(
                    2, -1) - measured_coeffs
                # Mahalanobis distance is only needed for geometric gating
                # squared_mahala_dist = error.T @ np.linalg.inv(innov) @ error

                # Semantic likelihood
                if self.semantic:
                    # Conditional probability on type
                    sem_likelihood = self._conditional_prob_type(
                        exp_type, measured_type)
                else:
                    # Truning off semantic association is equivalent to always
                    # set semantic likelihood to 1.0
                    sem_likelihood = 1.0

                # Gating (geometric and semantic)
                # Reject both geometrically and semantically unlikely associations
                # Note:
                # Since location distribution across lanes is essentially multimodal,
                # geometric gating often used when assuming location is unimodal is not
                # very reasonable and will inevitablly reject possible associations.
                # Here the geometric gate is set very large so we can preserve associations that
                # are a bit far from the current mode (the only mode that exists in the optimization)
                # but still possible when the multimodal nature is concerned.
                # Or, we can simply give up geometric gating and use semantic gating only.
                # The large geometric gate is an inelegant remedy after all.
                # if squared_mahala_dist <= self.geo_gate and sem_likelihood > self.sem_gate:
                if sem_likelihood > self.sem_gate:
                    gated_coeffs_list.append(exp_coeffs)
                    errors.append(error)

                    # Measurement likelihood (based on noise cov)
                    meas_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=self.noise_cov)

                    # Geometric likelihood (based on innov)
                    geo_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=innov)

                    # Due to numerical errors, likelihood can become exactly 0.0
                    # in some very rare cases.
                    # When it happens, simply ignore it.
                    if meas_likelihood > 0.0 and geo_likelihood > 0.0:
                        meas_likelihoods.append(sem_likelihood*meas_likelihood)
                        sem_likelihoods.append(sem_likelihood)
                        asso_prob = geo_likelihood * sem_likelihood
                        asso_probs.append(asso_prob)

            # Check if any possible association exists after gating
            if asso_probs:
                asso_probs = np.asarray(asso_probs)
                meas_likelihoods = np.asarray(meas_likelihoods)

                # Compute weights based on total probability theorem
                weights = (1-self.prob_null) * \
                    (asso_probs/np.sum(asso_probs))
                # Weight measurement likelihoods
                weighted_meas_likelihood = weights*meas_likelihoods

                # Add weight and weighted likelihood of null hypothesis
                weights = np.insert(weights, 0, self.prob_null)
                weighted_meas_likelihood = np.insert(
                    weighted_meas_likelihood, 0, null_weighted_meas_likelihood)

                asso_idx = np.argmax(weighted_meas_likelihood)

                if asso_idx == 0:
                    self._null_hypo = True
                else:
                    self._null_hypo = False
                    self.chosen_expected_coeffs = gated_coeffs_list[asso_idx]
                    # Scale down the hypothesis to account for target uncertainty
                    # This form is empirically chosen
                    self._scale = weights[asso_idx] * sem_likelihoods[asso_idx-1]
                    # Scale down the error based on weight
                    # This is to achieve the same effect of scaling infomation matrix during optimzation
                    chosen_error = errors[asso_idx] * self._scale
            else:
                self._null_hypo = True

        if self._null_hypo:
            # Null hypothesis
            self.chosen_expected_coeffs = null_expected_c0c1
            chosen_error = null_error

        return chosen_error