Exemplo n.º 1
0
 def render(self):
     dictionary_data = []
     for theta_arg, theta in enumerate(self.thetas):
         for phi_arg, phi in enumerate(self.phis):
             x = self.distance * np.sin(theta) * np.cos(phi)
             y = self.distance * np.sin(theta) * np.sin(phi)
             z = self.distance * np.cos(theta)
             matrices = compute_modelview_matrices(np.array([x, z, y]),
                                                   self.world_origin)
             camera_to_world, world_to_camera = matrices
             self.scene.set_pose(self.camera, camera_to_world)
             self.scene.set_pose(self.light, camera_to_world)
             camera_to_world = camera_to_world.flatten()
             world_to_camera = world_to_camera.flatten()
             image, depth = self.renderer.render(self.scene,
                                                 flags=self.RGBA)
             image, alpha = split_alpha_channel(image)
             matrices = np.vstack([world_to_camera, camera_to_world])
             sample = {
                 'image': image,
                 'alpha': alpha,
                 'depth': depth,
                 'matrices': matrices
             }
             dictionary_data.append(sample)
     return dictionary_data
Exemplo n.º 2
0
 def _change_scene(self):
     camera_origin = self._sample_camera_origin()
     camera_to_world, world_to_camera = compute_modelview_matrices(
         camera_origin, self.world_origin, self.roll, self.shift)
     self.scene.set_pose(self.camera, camera_to_world)
     self.scene.set_pose(self.light, camera_to_world)
     return camera_to_world, world_to_camera
Exemplo n.º 3
0
 def render(self):
     camera_origin, intensity = self._sample_parameters()
     camera_to_world, world_to_camera = compute_modelview_matrices(
         camera_origin, self.world_origin, self.roll, self.shift)
     self.light.light.intensity = intensity
     self.scene.set_pose(self.camera, camera_to_world)
     self.scene.set_pose(self.light, camera_to_world)
     image, depth = self.renderer.render(self.scene, flags=self.RGBA)
     image, alpha = split_alpha_channel(image)
     return image, alpha
Exemplo n.º 4
0
 def render(self):
     camera_origin, intensity = self._sample_parameters()
     camera_to_world, world_to_camera = compute_modelview_matrices(
         camera_origin, self.world_origin, self.roll, self.shift)
     self.light.light.intensity = intensity
     self.scene.set_pose(self.camera, camera_to_world)
     self.scene.set_pose(self.light, camera_to_world)
     self.pixel_mesh.mesh.is_visible = False
     image, depth = self.renderer.render(self.scene, self.flags_RGBA)
     self.pixel_mesh.mesh.is_visible = True
     image, alpha = split_alpha_channel(image)
     self.mesh.mesh.is_visible = False
     RGB_mask, _ = self.renderer.render(self.scene, self.flags_FLAT)
     self.mesh.mesh.is_visible = True
     return image, alpha, RGB_mask
Exemplo n.º 5
0
parser.add_argument('--background_wildcard',
                    type=str,
                    help='Wildcard for backgroun images',
                    default=os.path.join(
                        root_path,
                        '.keras/paz/datasets/voc-backgrounds/*.png'))
args = parser.parse_args()

# loading background image paths
image_paths = glob.glob(args.background_wildcard)
if len(image_paths) == 0:
    raise ValueError('Background images not found. Provide path to png images')

# setting symmetries
translation = np.array([0.0, 0.0, 0.33])
camera_pose, y = compute_modelview_matrices(translation, np.zeros((3)))
align_z = build_rotation_matrix_z(np.pi / 2)
camera_pose[:3, :3] = np.matmul(align_z, camera_pose[:3, :3])
min_corner = [-0.05, -0.02, -0.05]
max_corner = [+0.05, +0.02, +0.01]
angles = [0.0, np.pi]
symmetries = np.array([build_rotation_matrix_x(angle) for angle in angles])

# setting rendering function
H, W, num_channels = image_shape = [args.image_size, args.image_size, 3]
renderer = CanonicalPosePixelMaskRenderer(args.obj_path, camera_pose,
                                          min_corner, max_corner, symmetries,
                                          [H, W], args.y_fov, args.light)

# building full processor
inputs_to_shape = {'input_1': [H, W, num_channels]}  # inputs RGB
Exemplo n.º 6
0
def test_compute_modelview_matrices_random(origin_B, target_B, transform_B):
    matrices = compute_modelview_matrices(origin_B, target_B)
    camera_to_world, world_to_camera = matrices
    assert np.allclose(world_to_camera, transform_B)
    assert np.allclose(np.linalg.inv(world_to_camera), camera_to_world)