コード例 #1
0
  def handle_cam_odom(self, trans: List[float], rot: List[float], trans_std: List[float]) -> Optional[np.ndarray]:
    self.old_rpy_weight = min(0.0, self.old_rpy_weight - 1/SMOOTH_CYCLES)

    straight_and_fast = ((self.v_ego > MIN_SPEED_FILTER) and (trans[0] > MIN_SPEED_FILTER) and (abs(rot[2]) < MAX_YAW_RATE_FILTER))
    if self.wide_camera:
      angle_std_threshold = 4*MAX_VEL_ANGLE_STD
    else:
      angle_std_threshold = MAX_VEL_ANGLE_STD
    certain_if_calib = ((np.arctan2(trans_std[1], trans[0]) < angle_std_threshold) or
                        (self.valid_blocks < INPUTS_NEEDED))
    if not (straight_and_fast and certain_if_calib):
      return None

    observed_rpy = np.array([0,
                             -np.arctan2(trans[2], trans[0]),
                             np.arctan2(trans[1], trans[0])])
    new_rpy = euler_from_rot(rot_from_euler(self.get_smooth_rpy()).dot(rot_from_euler(observed_rpy)))
    new_rpy = sanity_clip(new_rpy)

    self.rpys[self.block_idx] = (self.idx*self.rpys[self.block_idx] + (BLOCK_SIZE - self.idx) * new_rpy) / float(BLOCK_SIZE)
    self.idx = (self.idx + 1) % BLOCK_SIZE
    if self.idx == 0:
      self.block_idx += 1
      self.valid_blocks = max(self.block_idx, self.valid_blocks)
      self.block_idx = self.block_idx % INPUTS_WANTED

    self.update_status()

    return new_rpy
コード例 #2
0
  def handle_cam_odom(self, trans, rot, trans_std, rot_std):
    straight_and_fast = ((self.v_ego > MIN_SPEED_FILTER) and (trans[0] > MIN_SPEED_FILTER) and (abs(rot[2]) < MAX_YAW_RATE_FILTER))
    certain_if_calib = ((np.arctan2(trans_std[1], trans[0]) < MAX_VEL_ANGLE_STD) or
                        (self.valid_blocks < INPUTS_NEEDED))
    if straight_and_fast and certain_if_calib:
      observed_rpy = np.array([0,
                               -np.arctan2(trans[2], trans[0]),
                               np.arctan2(trans[1], trans[0])])
      new_rpy = euler_from_rot(rot_from_euler(self.rpy).dot(rot_from_euler(observed_rpy)))
      new_rpy = sanity_clip(new_rpy)

      self.rpys[self.block_idx] = (self.idx*self.rpys[self.block_idx] + (BLOCK_SIZE - self.idx) * new_rpy) / float(BLOCK_SIZE)
      self.idx = (self.idx + 1) % BLOCK_SIZE
      if self.idx == 0:
        self.block_idx += 1
        self.valid_blocks = max(self.block_idx, self.valid_blocks)
        self.block_idx = self.block_idx % INPUTS_WANTED
      if self.valid_blocks > 0:
        self.rpy = np.mean(self.rpys[:self.valid_blocks], axis=0)
      self.update_status()

      # TODO: this should use the liveCalibration struct from cereal
      if self.param_put and ((self.idx == 0 and self.block_idx == 0) or self.just_calibrated):
        cal_params = {"calib_radians": list(self.rpy),
                      "valid_blocks": self.valid_blocks}
        put_nonblocking("CalibrationParams", json.dumps(cal_params).encode('utf8'))
      return new_rpy
    else:
      return None
コード例 #3
0
  def handle_cam_odom(self, trans, rot, trans_std, rot_std):
    self.old_rpy_weight = min(0.0, self.old_rpy_weight - 1/SMOOTH_CYCLES)

    straight_and_fast = ((self.v_ego > MIN_SPEED_FILTER) and (trans[0] > MIN_SPEED_FILTER) and (abs(rot[2]) < MAX_YAW_RATE_FILTER))
    certain_if_calib = ((np.arctan2(trans_std[1], trans[0]) < MAX_VEL_ANGLE_STD) or
                        (self.valid_blocks < INPUTS_NEEDED))
    if straight_and_fast and certain_if_calib:
      observed_rpy = np.array([0,
                               -np.arctan2(trans[2], trans[0]),
                               np.arctan2(trans[1], trans[0])])
      new_rpy = euler_from_rot(rot_from_euler(self.get_smooth_rpy()).dot(rot_from_euler(observed_rpy)))
      new_rpy = sanity_clip(new_rpy)

      self.rpys[self.block_idx] = (self.idx*self.rpys[self.block_idx] + (BLOCK_SIZE - self.idx) * new_rpy) / float(BLOCK_SIZE)
      self.idx = (self.idx + 1) % BLOCK_SIZE
      if self.idx == 0:
        self.block_idx += 1
        self.valid_blocks = max(self.block_idx, self.valid_blocks)
        self.block_idx = self.block_idx % INPUTS_WANTED
      if self.valid_blocks > 0:
        self.rpy = np.mean(self.rpys[:self.valid_blocks], axis=0)


      self.update_status()

      return new_rpy
    else:
      return None
コード例 #4
0
ファイル: camera.py プロジェクト: muggerhopper/openpilot
def rotate_img(img, eulers, crop=None, intrinsics=eon_intrinsics):
    size = img.shape[:2]
    rot = orient.rot_from_euler(eulers)
    quadrangle = np.array([[0, 0], [size[1] - 1, 0], [0, size[0] - 1],
                           [size[1] - 1, size[0] - 1]],
                          dtype=np.float32)
    quadrangle_norm = np.hstack(
        (normalize(quadrangle, intrinsics=intrinsics), np.ones((4, 1))))
    warped_quadrangle_full = np.einsum('ij, kj->ki', intrinsics.dot(rot),
                                       quadrangle_norm)
    warped_quadrangle = np.column_stack(
        (warped_quadrangle_full[:, 0] / warped_quadrangle_full[:, 2],
         warped_quadrangle_full[:, 1] / warped_quadrangle_full[:, 2])).astype(
             np.float32)
    if crop:
        W_border = (size[1] - crop[0]) / 2
        H_border = (size[0] - crop[1]) / 2
        outside_crop = (((warped_quadrangle[:, 0] < W_border) |
                         (warped_quadrangle[:, 0] >= size[1] - W_border)) &
                        ((warped_quadrangle[:, 1] < H_border) |
                         (warped_quadrangle[:, 1] >= size[0] - H_border)))
        if not outside_crop.all():
            raise ValueError("warped image not contained inside crop")
    else:
        H_border, W_border = 0, 0
    M = cv2.getPerspectiveTransform(quadrangle, warped_quadrangle)
    img_warped = cv2.warpPerspective(img, M, size[::-1])
    return img_warped[H_border:size[0] - H_border, W_border:size[1] - W_border]
コード例 #5
0
ファイル: paramsd.py プロジェクト: JasonJShuler/openpilot
  def handle_log(self, t, which, msg):
    if which == 'liveLocation':
      roll, pitch, yaw = math.radians(msg.roll), math.radians(msg.pitch), math.radians(msg.heading)
      v_device = orient.rot_from_euler([roll, pitch, yaw]).T.dot(msg.vNED)
      self.speed = v_device[0]

      self.update_active()
      if self.active:
        self.kf.predict_and_observe(t, ObservationKind.CAL_DEVICE_FRAME_YAW_RATE, [-msg.gyro[2]])
        self.kf.predict_and_observe(t, ObservationKind.CAL_DEVICE_FRAME_XY_SPEED, [[v_device[0], -v_device[1]]])

        # Clamp values
        x = self.kf.x
        if not (10 < x[States.STEER_RATIO] < 25):
          self.kf.predict_and_observe(t, ObservationKind.STEER_RATIO, [15.0])

        if not (0.5 < x[States.STIFFNESS] < 3.0):
          self.kf.predict_and_observe(t, ObservationKind.STIFFNESS, [1.0])

      else:
        self.kf.filter.filter_time = t - 0.1

    elif which == 'carState':
      self.carstate_counter += 1
      if self.carstate_counter % CARSTATE_DECIMATION == 0:
        self.steering_angle = msg.steeringAngle
        self.steering_pressed = msg.steeringPressed

        self.update_active()
        if self.active:
          self.kf.predict_and_observe(t, ObservationKind.STEER_ANGLE, [math.radians(msg.steeringAngle)])
          self.kf.predict_and_observe(t, ObservationKind.ANGLE_OFFSET_FAST, [0])
        else:
          self.kf.filter.filter_time = t - 0.1
コード例 #6
0
def get_view_frame_from_road_frame(roll, pitch, yaw, height):
    # TODO
    # calibration pitch is currently defined
    # opposite to pitch in device frame
    pitch = -pitch
    device_from_road = orient.rot_from_euler([roll, pitch,
                                              yaw]).dot(np.diag([1, -1, -1]))
    view_from_road = view_frame_from_device_frame.dot(device_from_road)
    return np.hstack((view_from_road, [[0], [height], [0]]))
コード例 #7
0
 def get_M(h=1.22):
   quadrangle = np.array([[0, cy + 20],
                          [size[1]-1, cy + 20],
                          [0, size[0]-1],
                          [size[1]-1, size[0]-1]], dtype=np.float32)
   quadrangle_norm = np.hstack((normalize(quadrangle, intrinsics=from_intr), np.ones((4,1))))
   quadrangle_world = np.column_stack((h*quadrangle_norm[:,0]/quadrangle_norm[:,1],
                                       h*np.ones(4),
                                       h/quadrangle_norm[:,1]))
   rot = orient.rot_from_euler(augment_eulers)
   to_extrinsics = np.hstack((rot.T, -augment_trans[:,None]))
   to_KE = to_intr.dot(to_extrinsics)
   warped_quadrangle_full = np.einsum('jk,ik->ij', to_KE, np.hstack((quadrangle_world, np.ones((4,1)))))
   warped_quadrangle = np.column_stack((warped_quadrangle_full[:,0]/warped_quadrangle_full[:,2],
                                        warped_quadrangle_full[:,1]/warped_quadrangle_full[:,2])).astype(np.float32)
   M = cv2.getPerspectiveTransform(quadrangle, warped_quadrangle.astype(np.float32))
   return M
コード例 #8
0
def get_warp_matrix(rpy_calib, wide_cam=False, big_model=False, tici=True):
    from common.transformations.orientation import rot_from_euler
    from common.transformations.camera import view_frame_from_device_frame, eon_fcam_intrinsics, tici_ecam_intrinsics, tici_fcam_intrinsics

    if tici and wide_cam:
        intrinsics = tici_ecam_intrinsics
    elif tici:
        intrinsics = tici_fcam_intrinsics
    else:
        intrinsics = eon_fcam_intrinsics

    if big_model:
        sbigmodel_from_calib = sbigmodel_frame_from_calib_frame[:, (0, 1, 2)]
        calib_from_model = np.linalg.inv(sbigmodel_from_calib)
    else:
        medmodel_from_calib = medmodel_frame_from_calib_frame[:, (0, 1, 2)]
        calib_from_model = np.linalg.inv(medmodel_from_calib)
    device_from_calib = rot_from_euler(rpy_calib)
    camera_from_calib = intrinsics.dot(
        view_frame_from_device_frame.dot(device_from_calib))
    warp_matrix = camera_from_calib.dot(calib_from_model)
    return warp_matrix
コード例 #9
0
ファイル: camera.py プロジェクト: muggerhopper/openpilot
def transform_img(base_img,
                  augment_trans=np.array([0, 0, 0]),
                  augment_eulers=np.array([0, 0, 0]),
                  from_intr=eon_intrinsics,
                  to_intr=eon_intrinsics,
                  calib_rot_view=None,
                  output_size=None):
    cy = from_intr[1, 2]
    size = base_img.shape[:2]
    if not output_size:
        output_size = size[::-1]
    h = 1.22
    quadrangle = np.array([[0, cy + 20], [size[1] - 1, cy + 20],
                           [0, size[0] - 1], [size[1] - 1, size[0] - 1]],
                          dtype=np.float32)
    quadrangle_norm = np.hstack(
        (normalize(quadrangle, intrinsics=from_intr), np.ones((4, 1))))
    quadrangle_world = np.column_stack(
        (h * quadrangle_norm[:, 0] / quadrangle_norm[:, 1], h * np.ones(4),
         h / quadrangle_norm[:, 1]))
    rot = orient.rot_from_euler(augment_eulers)
    if calib_rot_view is not None:
        rot = calib_rot_view.dot(rot)
    to_extrinsics = np.hstack((rot.T, -augment_trans[:, None]))
    to_KE = to_intr.dot(to_extrinsics)
    warped_quadrangle_full = np.einsum(
        'jk,ik->ij', to_KE, np.hstack((quadrangle_world, np.ones((4, 1)))))
    warped_quadrangle = np.column_stack(
        (warped_quadrangle_full[:, 0] / warped_quadrangle_full[:, 2],
         warped_quadrangle_full[:, 1] / warped_quadrangle_full[:, 2])).astype(
             np.float32)
    M = cv2.getPerspectiveTransform(quadrangle,
                                    warped_quadrangle.astype(np.float32))
    augmented_rgb = cv2.warpPerspective(base_img,
                                        M,
                                        output_size,
                                        borderMode=cv2.BORDER_REPLICATE)
    return augmented_rgb
コード例 #10
0
def get_view_frame_from_calib_frame(roll, pitch, yaw, height):
    device_from_calib = orient.rot_from_euler([roll, pitch, yaw])
    view_from_calib = view_frame_from_device_frame.dot(device_from_calib)
    return np.hstack((view_from_calib, [[0], [height], [0]]))
コード例 #11
0
def get_view_frame_from_road_frame(roll, pitch, yaw, height):
    device_from_road = orient.rot_from_euler([roll, pitch,
                                              yaw]).dot(np.diag([1, -1, -1]))
    view_from_road = view_frame_from_device_frame.dot(device_from_road)
    return np.hstack((view_from_road, [[0], [height], [0]]))
コード例 #12
0
 def handle_live_calib(self, current_time, log):
     if len(log.rpyCalib):
         self.calib = log.rpyCalib
         self.device_from_calib = rot_from_euler(self.calib)
         self.calib_from_device = self.device_from_calib.T
         self.calibrated = log.calStatus == 1
コード例 #13
0
FULL_FRAME_SIZE = (1164, 874)
W, H = FULL_FRAME_SIZE[0], FULL_FRAME_SIZE[1]
eon_focal_length = FOCAL = 910.0

intrinsic_matrix = np.array([[FOCAL, 0., W / 2.], [0., FOCAL, H / 2.],
                             [0., 0., 1.]])

MODEL_PATH_MAX_VERTICES_CNT = 50

view_frame_from_device_frame = device_frame_from_view_frame.T

roll = 0
pitch = 0
yaw = 0
height = 1.0
device_from_road = orient.rot_from_euler([roll, pitch,
                                          yaw]).dot(np.diag([1, -1, -1]))
view_from_road = view_frame_from_device_frame.dot(device_from_road)
extrinsic_matrix = np.hstack((view_from_road, [[0], [height], [0]])).flatten()

extrinsic_matrix_eigen = np.zeros((3, 4))
i = 0
while i < 4 * 3:
    extrinsic_matrix_eigen[int(i / 4), int(i % 4)] = extrinsic_matrix[i]

    i += 1

# extrinsic_matrix_eigen = np.array([[0,-1,0,0],
#                                    [0,0,-1,1],
#                                    [1,0,0,0]])

camera_frame_from_road_frame = np.dot(eon_intrinsics, extrinsic_matrix_eigen)