def test_compare_preprocess_simple():
    # Prepare chainer arrays
    viewing_angle = 30
    eye = [0, 0, -(1. / math.tan(math.radians(viewing_angle)) + 1)]
    vertices = np.array([[0.8, 0.8, 1.], [0.0, -0.5, 1.], [0.2, -0.4, 1.]])
    faces = np.array([[0, 1, 2]])

    vertices_ch = cp.array(vertices, 'float32')
    faces_ch = cp.array(faces, 'int32')
    vertices_ch, faces_ch = utils.to_minibatch((vertices_ch, faces_ch))

    # Prepare torch arrays
    vertices_th, faces_th = utils.to_minibatch_th((vertices, faces))

    look_at_vertices = look_at(vertices_ch, eye)
    perspective_vertices = perspective(look_at_vertices, angle=viewing_angle)
    faces_2 = vertices_to_faces(perspective_vertices, faces_ch)

    look_at_vertices_th = look_at_th(vertices_th, eye)
    perspective_vertices_th = perspective_th(look_at_vertices_th,
                                             angle=viewing_angle)
    faces_2_th = vertices_to_faces_th(perspective_vertices_th, faces_th)
    assert np.mean(np.abs(vertices_ch.get() - vertices_th.numpy())) == 0
    assert np.mean(
        np.abs(look_at_vertices.data.get() -
               look_at_vertices_th.numpy())) < 1e-5
    assert np.mean(
        np.abs(perspective_vertices.data.get() -
               perspective_vertices_th.numpy())) < 1e-5
    assert np.mean(np.abs(faces_2.data.get() - faces_2_th.numpy())) < 1e-5
Exemplo n.º 2
0
    def render(self, vertices, faces, textures):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                [faces, faces[:, :, faces.new([2, 1, 0]).long()]], dim=1)
            textures = torch.cat(
                [textures, textures.permute(0, 1, 4, 3, 2, 5)], dim=1)

        # lighting
        faces_lighting = vertices_to_faces_th(vertices, faces)
        textures = lighting_th(faces_lighting, textures,
                               self.light_intensity_ambient,
                               self.light_intensity_directional,
                               self.light_color_ambient,
                               self.light_color_directional,
                               self.light_direction)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = look_at_th(vertices, self.eye)
        elif self.camera_mode == 'look':
            raise NotImplementedError
            vertices = look(vertices, self.eye, self.camera_direction)

        # perspective transformation
        if self.perspective:
            vertices = perspective_th(vertices, angle=self.viewing_angle)

        # rasterization
        faces = vertices_to_faces_th(vertices, faces)
        images = self.rasterize_rgb(faces, textures)
        return images
def test_perspective_th():
    v_in = [1, 2, 10]
    v_out = [np.sqrt(3) / 10, 2 * np.sqrt(3) / 10, 10]
    vertices = np.array(v_in, 'float32')
    vertices = torch.Tensor(vertices[None, None, :])
    transformed = perspective_th(vertices)
    assert (torch.Tensor(v_out) - transformed).norm().item() < 1e-5
def preprocess_th(vertices_th, faces_th, viewing_angle=30, perspective=True):
    eye = [0, 0, -(1. / math.tan(math.radians(viewing_angle)) + 1)]
    look_at_vertices_th = look_at_th(vertices_th, eye)
    if perspective:
        perspective_vertices_th = perspective_th(look_at_vertices_th,
                                                 angle=viewing_angle)
    else:
        perspective_vertices_th = look_at_vertices_th
    faces_th = vertices_to_faces_th(perspective_vertices_th, faces_th)
    return faces_th
Exemplo n.º 5
0
    def render_silhouettes(self, vertices, faces):
        if self.fill_back:
            faces = torch.cat(
                [faces, faces[:, :, faces.new([2, 1, 0]).long()]], dim=1)
        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = look_at_th(vertices, self.eye)
        else:
            raise NotImplementedError

        # perspective transformation
        if self.perspective:
            vertices = perspective_th(vertices, angle=self.viewing_angle)

        # rasterization
        faces = vertices_to_faces_th(vertices, faces)
        images = self.rasterize_sil(faces)
        return images
Exemplo n.º 6
0
    def render_depth(self, vertices, faces):
        # fill back
        if self.fill_back:
            faces = torch.cat(
                [faces, faces[:, :, faces.new([2, 1, 0]).long()]], dim=1)

        # viewpoint transformation
        if self.camera_mode == 'look_at':
            vertices = look_at_th(vertices, self.eye)
        elif self.camera_mode == 'look':
            raise NotImplementedError

        # perspective transformation
        if self.perspective:
            vertices = perspective_th(vertices, angle=self.viewing_angle)

        # rasterization
        faces = vertices_to_faces_th(vertices, faces)
        images = rasterize_depth(faces, self.image_size, self.anti_aliasing)
        return images
def test_compare_preprocess_teapot():
    vertices, faces, textures = utils.load_teapot_batch()
    viewing_angle = 30
    eye = [0, 0, -(1. / math.tan(math.radians(viewing_angle)) + 1)]
    look_at_vertices = look_at(vertices, eye)
    perspective_vertices = perspective(look_at_vertices, angle=viewing_angle)
    faces_2 = vertices_to_faces(perspective_vertices, faces)

    vertices_th, faces_th, _ = utils.load_teapot_batch_th()
    eye = [0, 0, -(1. / math.tan(math.radians(viewing_angle)) + 1)]
    look_at_vertices_th = look_at_th(vertices_th, eye)
    perspective_vertices_th = perspective_th(look_at_vertices_th,
                                             angle=viewing_angle)
    faces_2_th = vertices_to_faces_th(perspective_vertices_th, faces_th)
    assert np.mean(np.abs(vertices.get() - vertices_th.numpy())) == 0
    assert np.mean(
        np.abs(look_at_vertices.data.get() -
               look_at_vertices_th.numpy())) < 1e-5
    assert np.mean(
        np.abs(perspective_vertices.data.get() -
               perspective_vertices_th.numpy())) < 1e-5
    assert np.mean(np.abs(faces_2.data.get() - faces_2_th.numpy())) < 1e-5