Пример #1
0
 def __rotate(self, axis, deg, local):
     if local:
         row = axis.tolist().index(1)
         localBasis = self.orientation.matrix33[row, :]
         rot = Quaternion.from_axis_rotation(localBasis,
                                             np.deg2rad(deg),
                                             dtype=np.float32)
     else:
         rot = Quaternion.from_axis_rotation(axis,
                                             np.deg2rad(deg),
                                             dtype=np.float32)
     self.orientation *= rot
Пример #2
0
 def roll(self, value):
     """Rotate using the forward direction
     """
     rotation = Quaternion.from_axis_rotation(self._forward,
                                              value * self._speed)
     self._up = normalize(quaternion.apply_to_vector(rotation, self._up))
     return self
Пример #3
0
 def yaw(self, value):
     """Rotate using up direction
     """
     rotation = Quaternion.from_axis_rotation(self._up, value * self._speed)
     self._forward = normalize(
         quaternion.apply_to_vector(rotation, self._forward))
     return self
Пример #4
0
 def mouseMoveEvent(self, event):
     pos = event.pos()
     # compute point on sphere under pointer
     (w, h) = self.viewport
     t = (2*self.old_pos.x() - w) / float(w)
     u = -(2*self.old_pos.y() - h) / float(h)
     # compute inverse of view transform ignoring rotation
     m = Matrix44.from_translation(Vector3([0, 0, -self.zoom])) * self.projTransform
     m = matrix44.inverse(m)
     rayOri = m * Vector3([t, u, -1])
     rayEnd = m * Vector3([t, u, 1])
     rayDir = rayEnd - rayOri
     self.picked = intersectRayUnitSphere(rayOri, rayDir)
     # rotate on left-drag
     if event.buttons() & QtCore.Qt.LeftButton > 0:
         # the rotation vector is the displacement vector rotated by 90 degrees
         dx = pos.x() - self.old_pos.x()
         dy = pos.y() - self.old_pos.y()
         if dx == 0 and dy == 0:
             return
         v = Vector3([dy, dx, 0])
         # update the current orientation
         self.layers.multiplyOrientation(Quaternion.from_axis_rotation(
             -v.normalised,
             -v.length * 0.002,
         ))
     elif event.buttons() & QtCore.Qt.RightButton > 0:
         dz = pos.y() - self.old_pos.y()
         self.zoom = max(0, self.zoom + dz / 100.0)
     self.old_pos = pos
     self.update()
Пример #5
0
    def convert_rvec_to_quaternion(self, rvec):
        '''Convert rvec (which is log quaternion) to quaternion'''
        theta = np.sqrt(rvec[0] * rvec[0] + rvec[1] * rvec[1] + rvec[2] * rvec[2])  # in radians
        raxis = [rvec[0] / theta, rvec[1] / theta, rvec[2] / theta]

        # pyrr's Quaternion (order is XYZW), https://pyrr.readthedocs.io/en/latest/oo_api_quaternion.html
        return Quaternion.from_axis_rotation(raxis, theta)
Пример #6
0
 def orient(self, x, y):
     """ Orient the current camera with the current x, y 
         The function will use two quaternions for computing the final rotation. By using 
         the current up and side vectors of the camera. The angle used for the rotations 
         are x and y passed by parameters. The function will normalize the new axis vectors
         for the camera by the current rotation. 
         Remark: there is no translation like in Orbit functionality.
     """
     side = normalize(np.cross(self._up, self._forward))
     rotationYaw = Quaternion.from_axis_rotation(
         self._up, -x * self._speed * self._sensitivity)
     rotationPitch = Quaternion.from_axis_rotation(
         side, y * self._speed * self._sensitivity)
     rotation = rotationYaw * rotationPitch
     self._forward = normalize(
         quaternion.apply_to_vector(rotation, self._forward))
     return self
Пример #7
0
 def pitch(self, value):
     """ Rotate using cross product between up and forward vectors (side).
     """
     side = normalize(np.cross(self._up, self._forward))
     rotation = Quaternion.from_axis_rotation(side, value * self._speed)
     self._forward = normalize(
         quaternion.apply_to_vector(rotation, self._forward))
     self._up = normalize(np.cross(self._forward, side))
     return self
Пример #8
0
 def computeForwardKinematics(self):
     s_pos, s_rot, joint_axis = self.prev.computeForwardKinematics();
     p_pos, p_rot = s_pos[-1], s_rot[-1];
     
     c_rot = p_rot * Quaternion.from_axis_rotation(self.rot_ax, self.start_angle + self.angle);
     c_pos = p_pos + c_rot * self.disp;
     ja = p_rot * Vector3(self.rot_ax);
     
     return (s_pos + [c_pos]), (s_rot + [c_rot]), (joint_axis + [ja]);
Пример #9
0
    def computeForwardKinematics(self):
        s_pos, s_rot, joint_axis = self.prev.computeForwardKinematics()
        p_pos, p_rot = s_pos[-1], s_rot[-1]

        c_rot = p_rot * Quaternion.from_axis_rotation(
            self.rot_ax, self.start_angle + self.angle)
        c_pos = p_pos + c_rot * self.disp
        ja = p_rot * Vector3(self.rot_ax)

        return (s_pos + [c_pos]), (s_rot + [c_rot]), (joint_axis + [ja])
Пример #10
0
    def mochae(self, timedelta: float) -> None:
        """Do moving, but look slowly."""
        comp = self.dir ^ self.movdir   # Comparison reference, for wisity checking.
        rent = arctan2(self.ure | comp, self.movdir | self.dir) # Absolute angle of movdir?
        if abs(rent) <= abs(self.rotspe * timedelta):
            self.dir = self.movdir.copy()     # Snap to alignment, don't risk rounding errors.
        else:
            r = Quaternion.from_axis_rotation(comp if comp.length else self.ure, self.rotspe * timedelta)
            self.dir = r * self.dir   # move over a little.

        if self.stat == self.states.forw:
            tent, rest = self.movdir * self.latspe * timedelta, self.orgp + self.movdir*2 - self.pos
            if tent.length > rest.length:   # Snap to grid.
                self.pos = ((self.pos + rest) * 2).round() / 2
                self.stat = self.states.stop
            else:
                self.pos += tent
Пример #11
0
    def solve_pnp(self, cuboid2d_points, pnp_algorithm = None):
        """
        Detects the rotation and traslation 
        of a cuboid object from its vertexes' 
        2D location in the image
        """

        # Fallback to default PNP algorithm base on OpenCV version
        if pnp_algorithm is None:
            if CuboidPNPSolver.cv2majorversion == 2:
                pnp_algorithm = cv2.CV_ITERATIVE
            elif CuboidPNPSolver.cv2majorversion == 3:
                pnp_algorithm = cv2.SOLVEPNP_ITERATIVE
            elif CuboidPNPSolver.cv2majorversion == 4:
                pnp_algorithm = cv2.SOLVEPNP_ITERATIVE
            else:
                raise Exception("Invalid OpenCV version")
                # Alternative algorithms:
                # pnp_algorithm = SOLVE_PNP_P3P  
                # pnp_algorithm = SOLVE_PNP_EPNP        
        
        location = None
        quaternion = None
        projected_points = cuboid2d_points

        cuboid3d_points = np.array(self._cuboid3d.get_vertices())
        obj_2d_points = []
        obj_3d_points = []

        for i in range(CuboidVertexType.TotalVertexCount):
            check_point_2d = cuboid2d_points[i]
            # Ignore invalid points
            if (check_point_2d is None):
                continue
            obj_2d_points.append(check_point_2d)
            obj_3d_points.append(cuboid3d_points[i])

        obj_2d_points = np.array(obj_2d_points, dtype=float)
        obj_3d_points = np.array(obj_3d_points, dtype=float)

        valid_point_count = len(obj_2d_points)

        # Can only do PNP if we have more than 3 valid points
        is_points_valid = valid_point_count >= 4

        if is_points_valid:
            
            ret, rvec, tvec = cv2.solvePnP(
                obj_3d_points,
                obj_2d_points,
                self._camera_intrinsic_matrix,
                self._dist_coeffs,
                flags=pnp_algorithm
            )

            if ret:
                location = list(x[0] for x in tvec)
                quaternion = self.convert_rvec_to_quaternion(rvec)
                
                projected_points, _ = cv2.projectPoints(cuboid3d_points, rvec, tvec, self._camera_intrinsic_matrix, self._dist_coeffs)
                projected_points = np.squeeze(projected_points)
                
                # If the location.Z is negative or object is behind the camera then flip both location and rotation
                x, y, z = location
                if z < 0:
                    # Get the opposite location
                    location = [-x, -y, -z]

                    # Change the rotation by 180 degree
                    rotate_angle = np.pi
                    rotate_quaternion = Quaternion.from_axis_rotation(location, rotate_angle)
                    quaternion = rotate_quaternion.cross(quaternion)

        return location, quaternion, projected_points
Пример #12
0
    def solve_pnp(self, cuboid2d_points, pnp_algorithm=None):
        """
        Detects the rotation and traslation
        of a cuboid object from its vertexes'
        2D location in the image
        """

        location = None
        quaternion = None
        projected_points = cuboid2d_points

        cuboid3d_points = np.array(self._cuboid3d.get_vertices())
        obj_2d_points = []
        obj_3d_points = []

        for i in range(CuboidVertexType.TotalVertexCount):
            check_point_2d = cuboid2d_points[i]
            # Ignore invalid points
            if (check_point_2d is None):
                continue
            obj_2d_points.append(check_point_2d)
            obj_3d_points.append(cuboid3d_points[i])

        obj_2d_points = np.array(obj_2d_points, dtype=float)
        obj_3d_points = np.array(obj_3d_points, dtype=float)

        valid_point_count = len(obj_2d_points)
        # print(valid_point_count, "valid points found" )

        # Set PNP algorithm based on OpenCV version and number of valid points
        is_points_valid = False

        if pnp_algorithm is None:
            if CuboidPNPSolver.cv2majorversion == 2:
                is_points_valid = True
                pnp_algorithm = cv2.CV_ITERATIVE
            elif CuboidPNPSolver.cv2majorversion > 2:
                if valid_point_count >= 6:
                    is_points_valid = True
                    pnp_algorithm = cv2.SOLVEPNP_ITERATIVE
                elif valid_point_count >= 4:
                    is_points_valid = True
                    pnp_algorithm = cv2.SOLVEPNP_P3P
                    # This algorithm requires EXACTLY four points, so we truncate our
                    # data
                    obj_3d_points = obj_3d_points[:4]
                    obj_2d_points = obj_2d_points[:4]
                    # Alternative algorithms:
                    # pnp_algorithm = SOLVE_PNP_EPNP
            else:
                assert False, "DOPE will not work with versions of OpenCV earlier than 2.0"

        if is_points_valid:
            try:
                ret, rvec, tvec = cv2.solvePnP(obj_3d_points,
                                               obj_2d_points,
                                               self._camera_intrinsic_matrix,
                                               self._dist_coeffs,
                                               flags=pnp_algorithm)
            except:
                # solvePnP will assert if there are insufficient points for the
                # algorithm
                print("cv2.solvePnP failed with an error")
                ret = False

            if ret:
                location = list(x[0] for x in tvec)
                quaternion = self.convert_rvec_to_quaternion(rvec)

                projected_points, _ = cv2.projectPoints(
                    cuboid3d_points, rvec, tvec, self._camera_intrinsic_matrix,
                    self._dist_coeffs)
                projected_points = np.squeeze(projected_points)

                # If the location.Z is negative or object is behind the camera then flip both location and rotation
                x, y, z = location
                if z < 0:
                    # Get the opposite location
                    location = [-x, -y, -z]

                    # Change the rotation by 180 degree
                    rotate_angle = np.pi
                    rotate_quaternion = Quaternion.from_axis_rotation(
                        location, rotate_angle)
                    quaternion = rotate_quaternion.cross(quaternion)

        return location, quaternion, projected_points
Пример #13
0
def _eval(class_name, path_to_data_dir, path_to_checkpoint, img_prefix):

    # load pre-trained model
    model = get_model(trunk='vgg19')
    model = model.cuda()
    use_vgg(model, './model', 'vgg19')
    print("=> Load pre-trained model from {}".format(path_to_checkpoint))
    model.load_state_dict(torch.load(path_to_checkpoint))
    model.eval()

    # parameter of object size for pnp solver
    print("=> Load {} object size".format(class_name))
    path_to_object_seetings = os.path.join(path_to_data_dir,
                                           '_object_settings.json')
    if not os.path.exists(path_to_object_seetings):
        raise FileNotFoundError(path_to_object_seetings)
    object_list = json.load(open(path_to_object_seetings))['exported_objects']
    object_size = None
    for obj in object_list:
        if obj['class'].find(class_name) != -1:
            object_size = obj['cuboid_dimensions']
    if not object_size:
        raise ValueError("Object size is none")
    _cuboid3d = Cuboid3d(object_size)
    cuboid3d_points = np.array(_cuboid3d.get_vertices())

    # parameter of camera for pnp solver
    path_to_camera_seetings = os.path.join(path_to_data_dir,
                                           '_camera_settings.json')
    if not os.path.exists(path_to_camera_seetings):
        raise FileNotFoundError(path_to_camera_seetings)
    intrinsic_settings = json.load(open(
        path_to_camera_seetings))['camera_settings'][0]['intrinsic_settings']
    matrix_camera = np.zeros((3, 3))
    matrix_camera[0, 0] = intrinsic_settings['fx']
    matrix_camera[1, 1] = intrinsic_settings['fy']
    matrix_camera[0, 2] = max(intrinsic_settings['cx'],
                              intrinsic_settings['cy'])
    matrix_camera[1, 2] = max(intrinsic_settings['cx'],
                              intrinsic_settings['cy'])
    matrix_camera[2, 2] = 1

    try:
        dist_coeffs = np.array(
            json.load(open(path_to_camera_seetings))['camera_settings'][0]
            ["distortion_coefficients"])
    except KeyError:
        dist_coeffs = np.zeros((4, 1))

    # dataloader
    val_dataset = Dataset(path_to_data=path_to_data_dir,
                          class_name=class_name,
                          split='val',
                          img_prefix=img_prefix)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=1,
                                shuffle=False,
                                num_workers=0,
                                drop_last=False)

    correct = 0
    wrong = 0
    # set threshold (cm)
    threshold = 3.0

    for batch_index, (images, _, _, location_targets,
                      ratio) in tqdm(enumerate(val_dataloader)):
        images = images.cuda()
        output, _ = model(images)
        line, vertex = output[0], output[1]
        line, vertex = line.squeeze(), vertex.squeeze()
        objects, peaks = find_objects(vertex, line)
        location_predictions = []
        if len(objects) > 0:
            for object in objects:
                cuboid2d_points = object[1] + [
                    (object[0][0] * 8, object[0][1] * 8)
                ]
                cuboid3d_points = np.array(cuboid3d_points)
                location = None
                quaternion = None
                obj_2d_points = []
                obj_3d_points = []

                for i in range(8):
                    check_point_2d = cuboid2d_points[i]
                    # Ignore invalid points
                    if (check_point_2d is None):
                        continue
                    elif check_point_2d[0] < 0 or check_point_2d[
                            1] < 0 or check_point_2d[
                                0] >= Config.crop_size / Config.stride or check_point_2d[
                                    1] >= Config.crop_size / Config.stride:
                        continue
                    else:
                        check_point_2d = (check_point_2d[0] * Config.stride *
                                          ratio, check_point_2d[1] *
                                          Config.stride * ratio)
                    obj_2d_points.append(check_point_2d)
                    obj_3d_points.append(cuboid3d_points[i])
                projected_points = object[1]
                vertexes = projected_points.copy()
                centroid = tuple([
                    int(point * Config.stride * ratio) for point in object[0]
                ])
                obj_2d_points = np.array(obj_2d_points, dtype=np.float32)
                obj_3d_points = np.array(obj_3d_points, dtype=np.float32)
                valid_point_count = len(obj_2d_points)
                if valid_point_count >= 4:
                    ret, rvec, tvec = cv2.solvePnP(
                        obj_3d_points,
                        obj_2d_points,
                        matrix_camera,
                        dist_coeffs,
                        flags=cv2.SOLVEPNP_ITERATIVE)
                    if ret:
                        location = list(x[0] for x in tvec)
                        quaternion = convert_rvec_to_quaternion(rvec)

                        projected_points, _ = cv2.projectPoints(
                            cuboid3d_points, rvec, tvec, matrix_camera,
                            dist_coeffs)
                        projected_points = np.squeeze(projected_points)
                        # If the location.Z is negative or object is behind the camera then flip both location and rotation
                        x, y, z = location
                        if z < 0:
                            # Get the opposite location
                            location = [-x, -y, -z]
                            # Change the rotation by 180 degree
                            rotate_angle = np.pi
                            rotate_quaternion = Quaternion.from_axis_rotation(
                                location, rotate_angle)
                            quaternion = rotate_quaternion.cross(quaternion)
                        vertexes = [tuple(p) for p in projected_points]
                    location_predictions.append(location)
        location_predictions = np.array(location_predictions)
        if len(location_targets) == 0:
            wrong += len(location_predictions)
        else:
            location_targets = location_targets.cpu().data.numpy()[0]
            for location_target in location_targets:
                distances = [
                    np.sqrt(
                        np.sum(
                            np.square(location_target -
                                      location_prediction / 10.0)))
                    for location_prediction in location_predictions
                ]
                if len(distances) == 0:
                    pass
                    wrong += 1
                elif min(distances) > threshold:
                    wrong += 1
                else:
                    correct += 1

    print('Object: {} Accuracy: {}%'.format(
        class_name, correct / (wrong + correct) * 100.0))
Пример #14
0
def main(args):
    output_dir = args.output_dir
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)
    path_to_data_dir = args.path_to_data_dir
    if not os.path.exists(path_to_data_dir):
        raise FileNotFoundError(path_to_data_dir)
    path_to_checkpoint = args.checkpoint
    if not os.path.exists(path_to_checkpoint):
        raise FileNotFoundError(path_to_data_dir)
    class_name = args.class_name
    fps = args.fps
    img_prefix = args.img_prefix

    # load pre-trained model
    model = get_model(trunk='vgg19')
    model = model.cuda()
    use_vgg(model, './model', 'vgg19')
    print("=> Load pre-trained model from {}".format(path_to_checkpoint))
    model.load_state_dict(torch.load(path_to_checkpoint))
    model.eval()

    # parameter of object size for pnp solver
    print("=> Load {} object size".format(class_name))
    path_to_object_seetings = os.path.join(path_to_data_dir,
                                           '_object_settings.json')
    if not os.path.exists(path_to_object_seetings):
        raise FileNotFoundError(path_to_object_seetings)
    object_list = json.load(open(path_to_object_seetings))['exported_objects']
    object_size = None
    for obj in object_list:
        if obj['class'].find(class_name) != -1:
            object_size = obj['cuboid_dimensions']
    if not object_size:
        raise ValueError("Object size is none")
    _cuboid3d = Cuboid3d(object_size)
    cuboid3d_points = np.array(_cuboid3d.get_vertices())

    # parameter of camera for pnp solver
    path_to_camera_seetings = os.path.join(path_to_data_dir,
                                           '_camera_settings.json')
    if not os.path.exists(path_to_camera_seetings):
        raise FileNotFoundError(path_to_camera_seetings)
    intrinsic_settings = json.load(open(
        path_to_camera_seetings))['camera_settings'][0]['intrinsic_settings']
    matrix_camera = np.zeros((3, 3))
    matrix_camera[0, 0] = intrinsic_settings['fx']
    matrix_camera[1, 1] = intrinsic_settings['fy']
    matrix_camera[0, 2] = max(intrinsic_settings['cx'],
                              intrinsic_settings['cy'])
    matrix_camera[1, 2] = max(intrinsic_settings['cx'],
                              intrinsic_settings['cy'])
    matrix_camera[2, 2] = 1
    try:
        dist_coeffs = np.array(
            json.load(open(path_to_camera_seetings))['camera_settings'][0]
            ["distortion_coefficients"])
    except KeyError:
        dist_coeffs = np.zeros((4, 1))
    path_to_sequences = sorted(
        glob.glob(os.path.join(path_to_data_dir, '*.{}'.format(img_prefix))))

    for img_path in path_to_sequences:
        original_img = crop(cv2.imread(img_path))
        ratio = max(original_img.shape[:2]) / Config.crop_size
        img = cv2.resize(original_img, (Config.crop_size, Config.crop_size))
        img = preprocess(img).float()
        img = torch.unsqueeze(img, 0)
        out, _ = model(img.cuda())
        line, vertex = out[0].squeeze(), out[1].squeeze()
        objects, peaks = find_objects(vertex, line)
        original_img = cv2.putText(original_img,
                                   "Class name: {}".format(class_name),
                                   (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1,
                                   (255, 255, 255), 2)

        if len(objects) > 0:
            for object in objects:
                cuboid2d_points = object[1] + [
                    (object[0][0] * 8, object[0][1] * 8)
                ]
                cuboid3d_points = np.array(cuboid3d_points)
                location = None
                quaternion = None
                obj_2d_points = []
                obj_3d_points = []

                for i in range(8):
                    check_point_2d = cuboid2d_points[i]
                    # Ignore invalid points
                    if check_point_2d is None:
                        continue
                    elif check_point_2d[0] < 0 or check_point_2d[
                            1] < 0 or check_point_2d[
                                0] >= Config.crop_size / Config.stride or check_point_2d[
                                    1] >= Config.crop_size / Config.stride:
                        continue
                    else:
                        check_point_2d = (check_point_2d[0] * Config.stride *
                                          ratio, check_point_2d[1] *
                                          Config.stride * ratio)
                    obj_2d_points.append(check_point_2d)
                    obj_3d_points.append(cuboid3d_points[i])
                centroid = tuple([
                    int(point * Config.stride * ratio) for point in object[0]
                ])
                original_img = cv2.circle(original_img, centroid, 5, -1)
                obj_2d_points = np.array(obj_2d_points, dtype=float)
                obj_3d_points = np.array(obj_3d_points, dtype=float)
                valid_point_count = len(obj_2d_points)
                if valid_point_count >= 5:
                    ret, rvec, tvec = cv2.solvePnP(
                        obj_3d_points,
                        obj_2d_points,
                        matrix_camera,
                        dist_coeffs,
                        flags=cv2.SOLVEPNP_ITERATIVE)
                    if ret:
                        location = list(x[0] for x in tvec)
                        quaternion = convert_rvec_to_quaternion(rvec)

                        projected_points, _ = cv2.projectPoints(
                            cuboid3d_points, rvec, tvec, matrix_camera,
                            dist_coeffs)
                        projected_points = np.squeeze(projected_points)
                        # If the location.Z is negative or object is behind the camera then flip both location and rotation
                        x, y, z = location
                        original_img = cv2.putText(
                            original_img,
                            "Location Prediction: x: {:.2f} y: {:.2f} z: {:.2f}"
                            .format(x / 10, y / 10, z / 10), (50, 150),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
                        print(
                            "Location Prediction: x: {:.2f} y: {:.2f} z: {:.2f}"
                            .format(x / 10, y / 10, z / 10))
                        if z < 0:
                            # Get the opposite location
                            location = [-x, -y, -z]

                            # Change the rotation by 180 degree
                            rotate_angle = np.pi
                            rotate_quaternion = Quaternion.from_axis_rotation(
                                location, rotate_angle)
                            quaternion = rotate_quaternion.cross(quaternion)
                        vertexes = [tuple(p) for p in projected_points]
                        plot(original_img, vertexes)
            if args.save:
                if not os.path.exists(output_dir):
                    os.makedirs(output_dir, exist_ok=True)
                output_path = os.path.join(output_dir, img_path.split('/')[-1])
                print('=> Save {}'.format(output_path))
                cv2.imwrite(output_path, original_img)
            if args.plot:
                original_img = cv2.resize(original_img, (600, 600))
                cv2.imshow('prediction', original_img)
                cv2.waitKey(int(1000 / fps))
Пример #15
0
    def solve_pnp(self,
                  cuboid2d_points,
                  pnp_algorithm=None,
                  fail_if_projected_diff_exceeds=50,
                  fail_if_projected_value_exceeds=1e5):
        """
        Detects the rotation and traslation 
        of a cuboid object from its vertexes' 
        2D location in the image

        Inputs:
        - cuboid2d_points:  list of XY tuples
          ...

        Outputs:
        - location in 3D
        - pose in 3D (as quaternion)
        - projected points:  np.ndarray of np.ndarrays

        """

        # Fallback to default PNP algorithm base on OpenCV version
        if pnp_algorithm is None:
            if CuboidPNPSolver.cv2majorversion == 2:
                pnp_algorithm = cv2.CV_ITERATIVE
            elif CuboidPNPSolver.cv2majorversion == 3:
                pnp_algorithm = cv2.SOLVEPNP_ITERATIVE
                # Alternative algorithms:
                # pnp_algorithm = SOLVE_PNP_P3P
                # pnp_algorithm = SOLVE_PNP_EPNP
            else:
                pnp_algorithm = cv2.SOLVEPNP_EPNP

        location = None
        quaternion = None
        projected_points = cuboid2d_points

        cuboid3d_points = np.array(self._cuboid3d.get_vertices())
        obj_2d_points = []
        obj_3d_points = []

        for i in range(CuboidVertexType.TotalVertexCount):
            check_point_2d = cuboid2d_points[i]
            # Ignore invalid points
            if (check_point_2d is None):
                continue
            obj_2d_points.append(check_point_2d)
            obj_3d_points.append(cuboid3d_points[i])

        obj_2d_points = np.array(obj_2d_points, dtype=float)
        obj_3d_points = np.array(obj_3d_points, dtype=float)

        valid_point_count = len(obj_2d_points)

        # Can only do PNP if we have more than 3 valid points
        is_points_valid = valid_point_count >= self.min_required_points

        if is_points_valid:

            ret, rvec, tvec = cv2.solvePnP(obj_3d_points,
                                           obj_2d_points,
                                           self._camera_intrinsic_matrix,
                                           self._dist_coeffs,
                                           flags=pnp_algorithm)

            if ret:
                location = list(x[0] for x in tvec)
                quaternion = self.convert_rvec_to_quaternion(rvec)

                projected_points, _ = cv2.projectPoints(
                    cuboid3d_points, rvec, tvec, self._camera_intrinsic_matrix,
                    self._dist_coeffs)
                projected_points = np.squeeze(projected_points)

                success = self.__check_pnp_result(
                    cuboid2d_points, projected_points,
                    fail_if_projected_diff_exceeds,
                    fail_if_projected_value_exceeds)

                # If the location.Z is negative or object is behind the camera then flip both location and rotation
                x, y, z = location
                if z < 0 or not success:
                    # Get the opposite location
                    location = [-x, -y, -z]

                    # Change the rotation by 180 degree
                    rotate_angle = np.pi
                    rotate_quaternion = Quaternion.from_axis_rotation(
                        location, rotate_angle)
                    quaternion = rotate_quaternion.cross(quaternion)
                    location = None
                    quaternion = None
        #             print("PNP solution is behind the camera (Z < 0) => flip the location and rotation")
        #         else:
        #             print("solvePNP found good results - location: {} - rotation: {} !!!".format(location, quaternion))
        #     else:
        #         print('Error:  solvePnP return false ****************************************')
        # else:
        #     print("Need at least 4 valid points in order to run PNP. Currently: {}".format(valid_point_count))
        return location, quaternion, projected_points
Пример #16
0
 def pitya(self, pit: float, ya: float) -> None:
     """Sets the pitch and yaw simultaneously, saves a second round of vector normalisation?"""
     self._pitch = clip(pit, -pi * 0.4999, pi * 0.4999)
     self._yaw = ya % (2 * pi)
     self.dir = (Quaternion.from_axis_rotation(self.ure, self._yaw) *
                 Quaternion.from_axis_rotation(self.xre, self._pitch) * self.zre)
Пример #17
0
 def yaw(self, value: float) -> None:
     """Sets the yref rotation."""
     self._yaw = value % (2 * pi)
     self.dir = (Quaternion.from_axis_rotation(self.ure, self._yaw) *
                 Quaternion.from_axis_rotation(self.xre, self._pitch) * self.zre)
Пример #18
0
 def pitch(self, value: float) -> None:
     """Sets the myx rotation."""
     self._pitch = clip(value, -pi * 0.4999, pi * 0.4999)
     self.dir = (Quaternion.from_axis_rotation(self.ure, self._yaw) *
                 Quaternion.from_axis_rotation(self.xre, self._pitch) * self.zre)