コード例 #1
0
ファイル: misc.py プロジェクト: fubowen1229/bop_toolkit
def get_symmetry_transformations(model_info, max_sym_disc_step):
    """Returns a set of symmetry transformations for an object model.

  :param model_info: See files models_info.json provided with the datasets.
  :param max_sym_disc_step: The maximum fraction of the object diameter which
    the vertex that is the furthest from the axis of continuous rotational
    symmetry travels between consecutive discretized rotations.
  :return: The set of symmetry transformations.
  """
    # Discrete symmetries.
    trans_disc = [{'R': np.eye(3), 't': np.array([[0, 0, 0]]).T}]  # Identity.
    if 'symmetries_discrete' in model_info:
        for sym in model_info['symmetries_discrete']:
            sym_4x4 = np.reshape(sym, (4, 4))
            R = sym_4x4[:3, :3]
            t = sym_4x4[:3, 3].reshape((3, 1))
            trans_disc.append({'R': R, 't': t})

    # Discretized continuous symmetries.
    trans_cont = []
    if 'symmetries_continuous' in model_info:
        for sym in model_info['symmetries_continuous']:
            axis = np.array(sym['axis'])
            offset = np.array(sym['offset']).reshape((3, 1))

            # (PI * diam.) / (max_sym_disc_step * diam.) = discrete_steps_count
            discrete_steps_count = int(np.ceil(np.pi / max_sym_disc_step))

            # Discrete step in radians.
            discrete_step = 2.0 * np.pi / discrete_steps_count

            for i in range(1, discrete_steps_count):
                R = transform.rotation_matrix(i * discrete_step, axis)[:3, :3]
                t = -R.dot(offset) + offset
                trans_cont.append({'R': R, 't': t})

    # Combine the discrete and the discretized continuous symmetries.
    trans = []
    for tran_disc in trans_disc:
        if len(trans_cont):
            for tran_cont in trans_cont:
                R = tran_cont['R'].dot(tran_disc['R'])
                t = tran_cont['R'].dot(tran_disc['t']) + tran_cont['t']
                trans.append({'R': R, 't': t})
        else:
            trans.append(tran_disc)

    return trans
コード例 #2
0
def sample_views(
      min_n_views, radius=1.0, azimuth_range=(0, 2 * math.pi),
      elev_range=(-0.5 * math.pi, 0.5 * math.pi), mode='hinterstoisser'):
  """Viewpoint sampling from a view sphere.

  :param min_n_views: The min. number of points to sample on the whole sphere.
  :param radius: Radius of the sphere.
  :param azimuth_range: Azimuth range from which the viewpoints are sampled.
  :param elev_range: Elevation range from which the viewpoints are sampled.
  :param mode: Type of sampling (options: 'hinterstoisser' or 'fibonacci').
  :return: List of views, each represented by a 3x3 ndarray with a rotation
    matrix and a 3x1 ndarray with a translation vector.
  """
  # Get points on a sphere.
  if mode == 'hinterstoisser':
    pts, pts_level = hinter_sampling(min_n_views, radius=radius)
  elif mode == 'fibonacci':
    n_views = min_n_views
    if n_views % 2 != 1:
      n_views += 1

    pts = fibonacci_sampling(n_views, radius=radius)
    pts_level = [0 for _ in range(len(pts))]
  else:
    raise ValueError('Unknown view sampling mode.')

  views = []
  for pt in pts:
    # Azimuth from (0, 2 * pi).
    azimuth = math.atan2(pt[1], pt[0])
    if azimuth < 0:
      azimuth += 2.0 * math.pi

    # Elevation from (-0.5 * pi, 0.5 * pi).
    a = np.linalg.norm(pt)
    b = np.linalg.norm([pt[0], pt[1], 0])
    elev = math.acos(b / a)
    if pt[2] < 0:
      elev = -elev

    if not (azimuth_range[0] <= azimuth <= azimuth_range[1] and
            elev_range[0] <= elev <= elev_range[1]):
      continue

    # Rotation matrix.
    # Adopted from gluLookAt function (uses OpenGL coordinate system):
    # [1] http://stackoverflow.com/questions/5717654/glulookat-explanation
    # [2] https://www.opengl.org/wiki/GluLookAt_code
    f = -np.array(pt)  # Forward direction.
    f /= np.linalg.norm(f)
    u = np.array([0.0, 0.0, 1.0])  # Up direction.
    s = np.cross(f, u)  # Side direction.
    if np.count_nonzero(s) == 0:
      # f and u are parallel, i.e. we are looking along or against Z axis.
      s = np.array([1.0, 0.0, 0.0])
    s /= np.linalg.norm(s)
    u = np.cross(s, f)  # Recompute up.
    R = np.array([[s[0], s[1], s[2]],
                  [u[0], u[1], u[2]],
                  [-f[0], -f[1], -f[2]]])

    # Convert from OpenGL to OpenCV coordinate system.
    R_yz_flip = transform.rotation_matrix(math.pi, [1, 0, 0])[:3, :3]
    R = R_yz_flip.dot(R)

    # Translation vector.
    t = -R.dot(np.array(pt).reshape((3, 1)))

    views.append({'R': R, 't': t})

  return views, pts_level
コード例 #3
0
def sample_views(min_n_views,
                 radius=1,
                 azimuth_range=(0, 2 * math.pi),
                 elev_range=(-0.5 * math.pi, 0.5 * math.pi)):
    '''
    Viewpoint sampling from a view sphere.

    :param min_n_views: Minimum required number of views on the whole view sphere.
    :param radius: Radius of the view sphere.
    :param azimuth_range: Azimuth range from which the viewpoints are sampled.
    :param elev_range: Elevation range from which the viewpoints are sampled.
    :return: List of views, each represented by a 3x3 rotation matrix and
             a 3x1 translation vector.
    '''

    # Get points on a sphere
    if True:
        pts, pts_level = hinter_sampling(min_n_views, radius=radius)
    else:
        pts = fibonacci_sampling(min_n_views + 1, radius=radius)
        pts_level = [0 for _ in range(len(pts))]

    views = []
    for pt in pts:
        # Azimuth from (0, 2 * pi)
        azimuth = math.atan2(pt[1], pt[0])
        if azimuth < 0:
            azimuth += 2.0 * math.pi

        # Elevation from (-0.5 * pi, 0.5 * pi)
        a = np.linalg.norm(pt)
        b = np.linalg.norm([pt[0], pt[1], 0])
        elev = math.acos(b / a)
        if pt[2] < 0:
            elev = -elev

        # if hemisphere and (pt[2] < 0 or pt[0] < 0 or pt[1] < 0):
        if not (azimuth_range[0] <= azimuth <= azimuth_range[1]
                and elev_range[0] <= elev <= elev_range[1]):
            continue

        # Rotation matrix
        # The code was adopted from gluLookAt function (uses OpenGL coordinate system):
        # [1] http://stackoverflow.com/questions/5717654/glulookat-explanation
        # [2] https://www.opengl.org/wiki/GluLookAt_code
        f = -np.array(pt)  # Forward direction
        f /= np.linalg.norm(f)
        u = np.array([0.0, 0.0, 1.0])  # Up direction
        s = np.cross(f, u)  # Side direction
        if np.count_nonzero(s) == 0:
            # f and u are parallel, i.e. we are looking along or against Z axis
            s = np.array([1.0, 0.0, 0.0])
        s /= np.linalg.norm(s)
        u = np.cross(s, f)  # Recompute up
        R = np.array([[s[0], s[1], s[2]], [u[0], u[1], u[2]],
                      [-f[0], -f[1], -f[2]]])

        # Convert from OpenGL to OpenCV coordinate system
        R_yz_flip = transform.rotation_matrix(math.pi, [1, 0, 0])[:3, :3]
        R = R_yz_flip.dot(R)

        # Translation vector
        t = -R.dot(np.array(pt).reshape((3, 1)))

        views.append({'R': R, 't': t})

    return views, pts_level
コード例 #4
0
 def set_axis_angle(self, theta, axis):
     mat = tf.rotation_matrix(theta, axis)
     self.rotation = mat[:3, :3]
コード例 #5
0
for line in f:
    camera = np.array([float(i) for i in line.split()])
    forw = camera[3:6]  # Forward direction
    forw /= np.linalg.norm(forw)
    up = camera[6:9]  # Up direction
    side = np.cross(forw, up)  # Side direction
    if np.count_nonzero(side) == 0:
        # f and u are parallel, i.e. we are looking along or against Z axis
        side = np.array([1.0, 0.0, 0.0])
    side /= np.linalg.norm(side)
    up = np.cross(side, forw)  # Recompute up
    R = np.array([[side[0], side[1], side[2]], [up[0], up[1], up[2]],
                  [-forw[0], -forw[1], -forw[2]]])

    # Convert from OpenGL to OpenCV coordinate system
    R_yz_flip = transform.rotation_matrix(math.pi, [1, 0, 0])[:3, :3]
    R = R_yz_flip.dot(R)

    # # Translation vector
    t = -R.dot(camera[0:3].reshape((3, 1)))
    Rt_gt = np.concatenate((R, t), axis=1)
    proj_2d_gt = compute_projection(points3D, Rt_gt.dot(objRT.transpose()),
                                    internal_calibration)
    [xrange, yrange] = np.max(proj_2d_gt, axis=1) - np.min(proj_2d_gt, axis=1)
    proj_2d_gt = np.divide(proj_2d_gt,
                           np.array([width, height])[:, np.newaxis])

    name = ""
    for i in range(0, 6 - len(str(idx))):
        name += "0"
    name += str(idx)
コード例 #6
0
p = {
    # See dataset_params.py for options.
    'dataset':
    'itodd',

    # Type of the renderer (used for the VSD pose error function).
    'renderer_type':
    'python',  # Options: 'cpp', 'python'.

    # See misc.get_symmetry_transformations().
    'max_sym_disc_step':
    0.01,
    'views': [{
        'R':
        tr.rotation_matrix(0.5 * np.pi, [1, 0, 0]).dot(
            tr.rotation_matrix(-0.5 * np.pi, [0, 0, 1])).dot(
                tr.rotation_matrix(0.1 * np.pi, [0, 1, 0]))[:3, :3],
        't':
        np.array([[0, 0, 500]]).T
    }],

    # Folder containing the BOP datasets.
    'datasets_path':
    config.datasets_path,

    # Folder for output visualisations.
    'vis_path':
    os.path.join(config.output_path, 'vis_object_symmetries'),

    # Path templates for output images.
    'vis_rgb_tpath':