Esempio n. 1
0
    def test_add_pointcloud_batch_instancer(self, instancer_out_dir,
                                            pointcloud):
        writer = timelapse.Timelapse(instancer_out_dir)

        data = {
            0: {
                'pointcloud_list': [pointcloud],
                'colors': None
            },
            10: {
                'pointcloud_list': [pointcloud + 100.],
                'colors': None
            },
        }
        for iteration, params in data.items():
            writer.add_pointcloud_batch(iteration=iteration,
                                        category='test',
                                        **params)

        # Verify
        filename = os.path.join(instancer_out_dir, 'test', 'pointcloud_0.usd')
        for iteration, params in data.items():
            pointcloud_in = io.usd.import_pointcloud(
                filename, scene_path='/pointcloud_0', time=iteration)[0]

            assert torch.allclose(pointcloud_in, params['pointcloud_list'][0])
Esempio n. 2
0
    def test_add_voxelgrid_batch(self, out_dir, voxelgrid):
        writer = timelapse.Timelapse(out_dir)

        data = {
            0: {
                'voxelgrid_list': [voxelgrid]
            },
            10: {
                'voxelgrid_list':
                [voxelgrid * (torch.rand_like(voxelgrid.float()) < 0.5)]
            },
        }
        for iteration, params in data.items():
            writer.add_voxelgrid_batch(iteration=iteration,
                                       category='test',
                                       **params)

        # Verify
        filename = os.path.join(out_dir, 'test', 'voxelgrid_0.usd')
        for iteration, params in data.items():
            voxelgrid_in = io.usd.import_voxelgrid(filename,
                                                   scene_path='/voxelgrid_0',
                                                   time=iteration)

            assert torch.equal(voxelgrid_in, params['voxelgrid_list'][0])
Esempio n. 3
0
    def test_add_pointcloud_batch_color(self, out_dir, pointcloud_color):
        writer = timelapse.Timelapse(out_dir)

        pointcloud, color = pointcloud_color

        data = {
            0: {
                'pointcloud_list': [pointcloud],
                'colors': [color],
                'points_type': 'usd_geom_points'
            },
            10: {
                'pointcloud_list': [pointcloud + 100.],
                'colors': [color],
                'points_type': 'usd_geom_points'
            },
        }
        for iteration, params in data.items():
            writer.add_pointcloud_batch(iteration=iteration,
                                        category='test',
                                        **params)

        # Verify
        filename = os.path.join(out_dir, 'test', 'pointcloud_0.usd')
        for iteration, params in data.items():
            pointcloud_in, color_in, normals_in = io.usd.import_pointcloud(
                filename, scene_path='/pointcloud_0', time=iteration)

            assert torch.allclose(pointcloud_in, params['pointcloud_list'][0])

            assert torch.allclose(color_in, params['colors'][0])
Esempio n. 4
0
    def test_parsing(self, timelapse_sample_dir, output_dir2, meshes):
        shutil.copytree(timelapse_sample_dir, output_dir2)

        parser = timelapse.TimelapseParser(output_dir2)
        expected_keys = [
            ('mesh', 'ground_truth', 0), ('mesh', 'ground_truth', 1),
            ('mesh', 'output', 0), ('mesh', 'output', 1),
            ('pointcloud', 'input', 0), ('pointcloud', 'input', 1),
            ('pointcloud', 'output', 0), ('pointcloud', 'output', 1),
            ('voxelgrid', 'output', 0), ('voxelgrid', 'output', 1)
        ]
        expected_keys.sort()
        assert sorted(parser.filepaths.keys()) == expected_keys
        for k in expected_keys:
            assert os.path.exists(parser.filepaths[k])

        assert parser.num_mesh_categories() == 2
        assert parser.num_pointcloud_categories() == 2
        assert parser.num_voxelgrid_categories() == 1

        expected_categories = {
            "mesh": [
                timelapse.TimelapseParser.CategoryInfo(
                    'ground_truth', ids=[0, 1], end_time=0).serializable(),
                timelapse.TimelapseParser.CategoryInfo(
                    'output', ids=[0, 1], end_time=100).serializable()
            ],
            "pointcloud": [
                timelapse.TimelapseParser.CategoryInfo(
                    'input', ids=[0, 1], end_time=0).serializable(),
                timelapse.TimelapseParser.CategoryInfo(
                    'output', ids=[0, 1], end_time=100).serializable()
            ],
            "voxelgrid": [
                timelapse.TimelapseParser.CategoryInfo(
                    'output', ids=[0, 1], end_time=100).serializable()
            ]
        }
        assert set(expected_categories.keys()) == set(parser.dir_info.keys())
        for k, v in expected_categories.items():
            expected = v
            actual = parser.dir_info[k]
            assert len(expected) == len(actual)
            for i in range(len(expected)):
                for ck, cv in expected[i].items(
                ):  # Only check expected properties
                    assert (ck in actual[i])
                    assert cv == actual[i][ck]

        # Now we add another iteration
        writer = timelapse.Timelapse(output_dir2)
        writer.add_mesh_batch(iteration=200,
                              category='output',
                              vertices_list=[m.vertices for m in meshes],
                              faces_list=[m.faces for m in meshes])
        assert parser.check_for_updates()
        assert parser.get_category_info('mesh', 'output')['end_time'] == 200

        # Now let's delete a category
        shutil.rmtree(os.path.join(output_dir2, 'output'))
        assert parser.check_for_updates()
        assert parser.num_mesh_categories() == 1
        assert parser.num_pointcloud_categories() == 1
        assert parser.num_voxelgrid_categories() == 0
Esempio n. 5
0
    def test_add_mesh_batch(self, out_dir, meshes, material_values,
                            material_textures):
        writer = timelapse.Timelapse(out_dir)
        data = {
            0: {
                'vertices_list': [m.vertices for m in meshes],
                'faces_list': [m.faces for m in meshes],
                'uvs_list': [m.uvs for m in meshes],
                'face_uvs_idx_list': [m.face_uvs_idx for m in meshes],
                'face_normals_list': [m.face_normals for m in meshes],
                'materials_list': [{
                    'values': material_values,
                    'textures': material_textures
                }]
            },
            10: {
                'vertices_list': [m.vertices / 2. for m in meshes],
                'faces_list': [m.faces for m in meshes],
                'materials_list': [{
                    'values': material_values,
                    'textures': material_textures
                }]
            },
        }
        for iteration, params in data.items():
            writer.add_mesh_batch(iteration=iteration,
                                  category='test',
                                  **params)

        # Check that category directory is created
        assert os.path.exists(os.path.join(out_dir, 'test'))

        # Check that data at each iteration is correct
        texture_dir = os.path.join(out_dir, 'test', 'textures')
        assert os.path.exists(texture_dir)
        for iteration in data.keys():
            filename = os.path.join(out_dir, 'test', 'mesh_0.usd')
            mesh_in = io.usd.import_mesh(filename,
                                         time=iteration,
                                         with_normals=True)
            # Verify mesh properties
            assert torch.allclose(data[iteration]['vertices_list'][0],
                                  mesh_in.vertices)
            assert torch.equal(data[iteration]['faces_list'][0], mesh_in.faces)
            if not data[iteration].get('face_uvs_idx_list'):
                i = 0
            else:
                i = iteration
            assert torch.allclose(data[i]['uvs_list'][0].view(-1, 2),
                                  mesh_in.uvs.view(-1, 2))
            # assert torch.equal(data[i]['face_uvs_idx_list'][0], mesh_in.face_uvs_idx)
            assert torch.allclose(data[i]['face_normals_list'][0],
                                  mesh_in.face_normals)

            materials = data[iteration]['materials_list'][0]
            # Verify materials textures exist
            for attr in ['diffuse', 'specular', 'roughness', 'metallic']:
                assert os.path.exists(
                    os.path.join(texture_dir,
                                 f'mesh_0_textures_{iteration}_{attr}.png'))

            # Verify material properties
            for variant_name, material_data in materials.items():
                mat = io.materials.PBRMaterial().read_from_usd(
                    filename, f'/mesh_0/{variant_name}', time=iteration)
                assert pytest.approx(mat.diffuse_color,
                                     1e-5) == material_data.diffuse_color
                assert pytest.approx(mat.specular_color,
                                     1e-5) == material_data.specular_color
                assert pytest.approx(mat.roughness_value,
                                     1e-5) == material_data.roughness_value
                assert pytest.approx(mat.metallic_value,
                                     1e-5) == material_data.metallic_value

                if material_data.diffuse_texture is not None:
                    assert torch.allclose(mat.diffuse_texture,
                                          material_data.diffuse_texture,
                                          atol=1e-2)
                    assert torch.allclose(mat.specular_texture,
                                          material_data.specular_texture,
                                          atol=1e-2)
                    assert torch.allclose(mat.roughness_texture,
                                          material_data.roughness_texture,
                                          atol=1e-2)
                    assert torch.allclose(mat.metallic_texture,
                                          material_data.metallic_texture,
                                          atol=1e-2)