コード例 #1
0
ファイル: room.py プロジェクト: sxcgc/python-rtx
group = rtx.ObjectGroup()
geometry = rtx.PlainGeometry(size, size)
geometry.set_rotation((0, math.pi / 2, 0))
geometry.set_position((-10, 0, 0))
material = rtx.EmissiveMaterial(3, visible=False)
mapping = rtx.SolidColorMapping((1, 1, 1))
light = rtx.Object(geometry, material, mapping)
group.add(light)

group.set_rotation((-math.pi / 3, math.pi / 2, 0))
scene.add(group)

screen_width = 64
screen_height = 64

rt_args = rtx.RayTracingArguments()
rt_args.num_rays_per_pixel = 2048
rt_args.max_bounce = 3
rt_args.next_event_estimation_enabled = False
rt_args.supersampling_enabled = True

cuda_args = rtx.CUDAKernelLaunchArguments()
cuda_args.num_threads = 64
cuda_args.num_rays_per_thread = 128

renderer = rtx.Renderer()

camera = rtx.PerspectiveCamera(eye=(0, 0, 6),
                               center=(0, 0, 0),
                               up=(0, 1, 0),
                               fov_rad=math.pi / 3,
コード例 #2
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Texture
    wall_texture_filename_array = [
        "textures/wall_texture_5.jpg",
    ]
    floor_texture_filename_array = [
        "textures/floor_texture_1.png",
    ]

    # Load MNIST images
    mnist_image_array = load_mnist_images()

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = True
    rt_args.next_event_estimation_enabled = True

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 16

    renderer = rtx.Renderer()
    render_buffer = np.zeros(
        (screen_height, screen_width, 3), dtype=np.float32)

    perspective_camera = rtx.PerspectiveCamera(
        fov_rad=math.pi / 3, aspect_ratio=screen_width / screen_height)
    orthogonal_camera = rtx.OrthographicCamera()

    plt.tight_layout()

    scene = build_scene(
        random.sample(mnist_image_array, 6), wall_texture_filename_array,
        floor_texture_filename_array)

    view_radius = 3
    rotation = 0

    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    for _ in range(args.num_views_per_scene):
        eye = (view_radius * math.cos(rotation), -0.125,
               view_radius * math.sin(rotation))
        center = (0, 0, 0)
        perspective_camera.look_at(eye, center, up=(0, 1, 0))

        renderer.render(scene, perspective_camera, rt_args, cuda_args,
                        render_buffer)
        image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
        image = np.uint8(image * 255)
        image = cv2.bilateralFilter(image, 3, 25, 25)
        im1 = axis_perspective.imshow(
            image, interpolation="none", animated=True)

        eye = (view_radius * math.cos(rotation),
               view_radius * math.sin(math.pi / 6),
               view_radius * math.sin(rotation))
        center = (0, 0, 0)
        orthogonal_camera.look_at(eye, center, up=(0, 1, 0))

        renderer.render(scene, orthogonal_camera, rt_args, cuda_args,
                        render_buffer)
        image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
        image = np.uint8(image * 255)
        image = cv2.bilateralFilter(image, 3, 25, 25)
        im2 = axis_orthogonal.imshow(
            image, interpolation="none", animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        rotation += math.pi / 36

    ani = animation.ArtistAnimation(
        fig, ims, interval=1 / 24, blit=True, repeat_delay=0)

    ani.save('mnist_dice.gif', writer="imagemagick")
コード例 #3
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.next_event_estimation_enabled = True
    rt_args.ambient_light_intensity = 0.1

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                   aspect_ratio=screen_width / screen_height)
    camera_distance = 2

    for _ in tqdm(range(total_scenes_to_render)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for _ in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            camera_position = np.array(
                (rand_position_xz[0], wall_height / 2, rand_position_xz[1]))
            center = np.array((0, wall_height / 2, 0))

            # Compute yaw and pitch
            camera_direction = camera_position - center
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera.look_at(tuple(camera_position), tuple(center), up=(0, 1, 0))
            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)
コード例 #4
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.next_event_estimation_enabled = True
    rt_args.ambient_light_intensity = 0.1

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    perspective_camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                               aspect_ratio=screen_width /
                                               screen_height)
    orthographic_camera = rtx.OrthographicCamera()
    camera_distance = 2

    plt.tight_layout()

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_objects(scene,
                  colors,
                  min_num_objects=args.num_objects,
                  max_num_objects=args.num_objects,
                  discrete_position=args.discrete_position,
                  rotate_object=args.rotate_object)

    current_rad = 0
    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)

    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    for _ in range(total_frames):
        # Perspective camera
        camera_position = (camera_distance * math.cos(current_rad),
                           wall_height / 2,
                           camera_distance * math.sin(current_rad))
        center = (0, wall_height / 2, 0)

        perspective_camera.look_at(camera_position, center, up=(0, 1, 0))
        renderer.render(scene, perspective_camera, rt_args, cuda_args,
                        render_buffer)

        image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
        image = np.uint8(image * 255)
        image = cv2.bilateralFilter(image, 3, 25, 25)
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)

        # Orthographic camera
        offset_y = 1
        camera_position = (2 * math.cos(current_rad),
                           2 * math.sin(math.pi / 6) + offset_y,
                           2 * math.sin(current_rad))
        center = (0, offset_y, 0)

        orthographic_camera.look_at(camera_position, center, up=(0, 1, 0))
        renderer.render(scene, orthographic_camera, rt_args, cuda_args,
                        render_buffer)

        image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
        image = np.uint8(image * 255)
        image = cv2.bilateralFilter(image, 3, 25, 25)
        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        # plt.pause(1e-8)

        current_rad += rad_step

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "rooms"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_object:
        filename += "_rotate_object"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
コード例 #5
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Texture
    wall_texture_filename_array = [
        "textures/wall_texture_1.png",
        "textures/wall_texture_2.jpg",
        "textures/wall_texture_3.jpg",
        "textures/wall_texture_4.jpg",
        "textures/wall_texture_5.jpg",
        "textures/wall_texture_6.jpg",
        "textures/wall_texture_7.jpg",
    ]
    floor_texture_filename_array = [
        "textures/floor_texture_1.png",
    ]

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    grid_size = 8
    wall_height = 3

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = True
    rt_args.next_event_estimation_enabled = True

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 16

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    dataset = gqn.archiver.Archiver(
        directory=args.output_directory,
        total_observations=args.total_observations,
        num_observations_per_file=min(args.num_observations_per_file,
                                      args.total_observations),
        image_size=(args.image_size, args.image_size),
        num_views_per_scene=args.num_views_per_scene,
        initial_file_number=args.initial_file_number)

    camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                   aspect_ratio=screen_width / screen_height)

    for _ in tqdm(range(args.total_observations)):
        scene = build_scene(color_array, wall_texture_filename_array,
                            floor_texture_filename_array, grid_size,
                            wall_height)
        scene_data = gqn.archiver.SceneData((args.image_size, args.image_size),
                                            args.num_views_per_scene)

        for _ in range(args.num_views_per_scene):
            rotation = random.uniform(0, math.pi * 2)
            radius = grid_size - 5
            eye = (radius * math.cos(rotation), -0.125,
                   radius * math.sin(rotation))
            center = (0, -0.125, 0)
            camera.look_at(eye, center, up=(0, 1, 0))

            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            yaw = gqn.math.yaw(eye, center)
            pitch = gqn.math.pitch(eye, center)
            scene_data.add(image, eye, math.cos(yaw), math.sin(yaw),
                           math.cos(pitch), math.sin(pitch))

            # plt.imshow(image, interpolation="none")
            # plt.pause(1e-8)

        dataset.add(scene_data)
コード例 #6
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 0.9
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 512
    rt_args.max_bounce = 2
    rt_args.supersampling_enabled = False

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    camera = rtx.OrthographicCamera()

    fig = plt.figure(figsize=(3, 3))
    ims = []

    scene = build_scene(color_array)

    view_radius = 3
    rotation = 0

    for _ in range(args.num_views_per_scene):
        eye = (view_radius * math.cos(rotation),
               view_radius * math.sin(math.pi / 6),
               view_radius * math.sin(rotation))
        center = (0, 0, 0)
        camera.look_at(eye, center, up=(0, 1, 0))

        renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

        # Convert to sRGB
        image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
        image = np.uint8(image * 255)
        image = cv2.bilateralFilter(image, 3, 25, 25)

        im = plt.imshow(image, interpolation="none", animated=True)
        ims.append([im])

        plt.pause(1e-8)
        rotation += math.pi / 36

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)

    ani.save('shepard_matzler.gif', writer="imagemagick")
コード例 #7
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 0.9
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 512
    rt_args.max_bounce = 2
    rt_args.supersampling_enabled = False

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    dataset = gqn.archiver.Archiver(
        directory=args.output_directory,
        total_observations=args.total_observations,
        num_observations_per_file=min(args.num_observations_per_file,
                                      args.total_observations),
        image_size=(args.image_size, args.image_size),
        num_views_per_scene=args.num_views_per_scene,
        frames_per_rotation=args.frames_per_rotation,
        initial_file_number=args.initial_file_number)

    camera = rtx.OrthographicCamera()

    for _ in tqdm(range(args.total_observations)):
        scene = build_scene(color_array)
        scene_data = gqn.archiver.SceneData((args.image_size, args.image_size),
                                            args.num_views_per_scene,
                                            args.frames_per_rotation)

        view_radius = 3
        angle_rad = 0

        for _ in range(args.frames_per_rotation):
            eye = rotate_viewpoint(angle_rad)
            eye = tuple(view_radius * (eye / np.linalg.norm(eye)))
            center = (0, 0, 0)
            camera.look_at(eye, center, up=(0, 1, 0))

            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            scene_data.add_orig(image)
            angle_rad += 2 * math.pi / args.frames_per_rotation

        for _ in range(args.num_views_per_scene):
            eye = np.random.normal(size=3)
            eye = tuple(view_radius * (eye / np.linalg.norm(eye)))
            center = (0, 0, 0)
            camera.look_at(eye, center, up=(0, 1, 0))

            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            # plt.imshow(image, interpolation="none")
            # plt.pause(1e-8)

            yaw = gqn.math.yaw(eye, center)
            pitch = gqn.math.pitch(eye, center)
            scene_data.add(image, eye, math.cos(yaw), math.sin(yaw),
                           math.cos(pitch), math.sin(pitch))

        dataset.add(scene_data)
コード例 #8
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 0.9
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 512
    rt_args.max_bounce = 2
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.ambient_light_intensity = 0.05

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    camera = rtx.OrthographicCamera()

    fig = plt.figure(figsize=(3, 3))
    ims = []

    camera_distance = 1
    current_rad = 0
    rad_step = math.pi / 18
    total_frames = int(math.pi * 2 / rad_step)

    for num_cubes in range(1, 8):
        scene = build_scene(num_cubes, color_array)

        for _ in range(total_frames):
            camera_position = (camera_distance * math.sin(current_rad),
                               camera_distance * math.sin(math.pi / 6),
                               camera_distance * math.cos(current_rad))
            center = (0, 0, 0)
            camera.look_at(camera_position, center, up=(0, 1, 0))

            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            im = plt.imshow(image, interpolation="none", animated=True)
            ims.append([im])

            # plt.pause(1e-8)

            current_rad += rad_step

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)

    ani.save('shepard_matzler.gif', writer="imagemagick")
コード例 #9
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 0.9
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 512
    rt_args.max_bounce = 2
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.ambient_light_intensity = 0.05

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    camera = rtx.OrthographicCamera()

    for _ in tqdm(range(args.total_scenes)):
        scene = build_scene(args.num_cubes, color_array)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)

        camera_distance = 1

        for _ in range(args.num_observations_per_scene):
            # Generate random point on a sphere
            camera_position = np.random.normal(size=3)
            camera_position = camera_distance * camera_position / np.linalg.norm(
                camera_position)
            # Compute yaw and pitch
            yaw, pitch = compute_yaw_and_pitch(camera_position)

            center = (0, 0, 0)
            camera.look_at(tuple(camera_position), center, up=(0, 1, 0))

            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

        archiver.add(scene_data)
コード例 #10
0
def main():
    try:
        os.mkdir(os.path.join(args.dataset_path, "test_data"))
    except:
        pass

    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 0.9
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 512
    rt_args.max_bounce = 2
    rt_args.supersampling_enabled = False

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    camera = rtx.OrthographicCamera()

    # enumerateがちゃんと動くか心配...
    original_data = c_gqn.data.Dataset(args.dataset_path)
    for i, subset in enumerate(original_data):
        iterator = c_gqn.data.Iterator(subset, batch_size=1)

        for j, data_indices in enumerate(iterator):
            _images, viewpoints, _original_images = subset[data_indices]

            images = []
            scene = build_scene(color_array)
            for viewpoint in viewpoints[0]:
                eye = tuple(viewpoint[0:3])

                center = (0, 0, 0)
                camera.look_at(eye, center, up=(0, 1, 0))

                renderer.render(scene, camera, rt_args, cuda_args,
                                render_buffer)

                # Convert to sRGB
                image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
                image = np.uint8(image * 255)
                image = cv2.bilateralFilter(image, 3, 25, 25)

                images.append(image)

            view_radius = 3
            angle_rad = 0
            original_images = []
            for _ in range(args.frames_per_rotation):
                eye = rotate_viewpoint(angle_rad)
                eye = tuple(view_radius * (eye / np.linalg.norm(eye)))
                center = (0, 0, 0)
                camera.look_at(eye, center, up=(0, 1, 0))

                renderer.render(scene, camera, rt_args, cuda_args,
                                render_buffer)

                # Convert to sRGB
                original_image = np.power(np.clip(render_buffer, 0, 1),
                                          1.0 / 2.2)
                original_image = np.uint8(original_image * 255)
                original_image = cv2.bilateralFilter(original_image, 3, 25, 25)

                original_images.append(original_image)
                angle_rad += 2 * math.pi / args.frames_per_rotation

            np.save(
                os.path.join(args.dataset_path, "test_data",
                             str(i) + "_" + str(j) + ".npy"),
                [images, original_images])
            print('saved:  ' + str(i) + "_" + str(j) + ".npy")
コード例 #11
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Load MNIST images
    mnist_images = load_mnist_images()

    # Set GPU device
    rtx.set_device(args.gpu_device)

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.next_event_estimation_enabled = True
    rt_args.ambient_light_intensity = 0.1

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                   aspect_ratio=screen_width / screen_height)
    camera_distance = 2

    for _ in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_dice(scene,
                   mnist_images,
                   discrete_position=args.discrete_position,
                   rotate_dice=args.rotate_dice)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for _ in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            camera_position = np.array(
                (rand_position_xz[0], wall_height / 2, rand_position_xz[1]))
            center = np.array((0, wall_height / 2, 0))

            # Compute yaw and pitch
            camera_direction = camera_position - center
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera.look_at(tuple(camera_position), tuple(center), up=(0, 1, 0))
            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)
コード例 #12
0
def main():
    try:
        os.mkdir(args.figure_directory)
    except:
        pass

    #### Model ####
    xp = np
    using_gpu = args.gpu_device >= 0
    if using_gpu:
        cuda.get_device(args.gpu_device).use()
        xp = cp

    hyperparams = HyperParameters(snapshot_directory=args.snapshot_path)
    model = Model(hyperparams, snapshot_directory=args.snapshot_path)
    if using_gpu:
        model.to_gpu()
    print(hyperparams)

    #### Renderer ####
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    color_array = []
    for n in range(args.num_colors):
        hue = n / (args.num_colors - 1)
        saturation = 0.9
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        color_array.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 2048
    rt_args.max_bounce = 4
    rt_args.supersampling_enabled = False

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    camera = rtx.OrthographicCamera()

    #### Figure ####
    plt.style.use("dark_background")
    fig = plt.figure(figsize=(8, 4))
    fig.suptitle("GQN")

    axis_observation = fig.add_subplot(1, 2, 1)
    axis_observation.axis("off")
    axis_observation.set_title("Observation")

    axis_generation = fig.add_subplot(1, 2, 2)
    axis_generation.axis("off")
    axis_generation.set_title("Generation")

    for scene_index in range(1, 100):
        scene = build_scene(color_array)

        eye_scale = 3
        total_frames_per_rotation = 48
        artist_frame_array = []

        observation_viewpoint_angle_rad = 0
        for k in range(5):
            eye = tuple(p * eye_scale for p in [
                math.cos(observation_viewpoint_angle_rad),
                math.sin(observation_viewpoint_angle_rad), 0
            ])
            center = (0, 0, 0)
            camera.look_at(eye, center, up=(0, 1, 0))

            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            frame = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            frame = np.uint8(frame * 255)
            frame = cv2.bilateralFilter(frame, 3, 25, 25)

            observation_viewpoint_angle_rad += math.pi / 20

            yaw = gqn.math.yaw(eye, center)
            pitch = gqn.math.pitch(eye, center)
            ovserved_viewpoint = np.array(
                eye + (math.cos(yaw), math.sin(yaw), math.cos(pitch),
                       math.sin(pitch)),
                dtype=np.float32)
            ovserved_viewpoint = ovserved_viewpoint[None, None, ...]

            observed_image = frame.astype(np.float32)
            observed_image = preprocess_images(observed_image, add_noise=False)
            observed_image = observed_image[None, None, ...]
            observed_image = observed_image.transpose((0, 1, 4, 2, 3))

            if using_gpu:
                ovserved_viewpoint = to_gpu(ovserved_viewpoint)
                observed_image = to_gpu(observed_image)

            representation = model.compute_observation_representation(
                observed_image, ovserved_viewpoint)

            query_viewpoint_angle_rad = 0
            for t in range(total_frames_per_rotation):
                artist_array = []

                query_viewpoint = rotate_query_viewpoint(
                    query_viewpoint_angle_rad, 1, xp)
                # query_viewpoint = rotate_query_viewpoint(math.pi / 6, 1, xp)
                generated_image = model.generate_image(query_viewpoint,
                                                       representation)
                generated_image = make_uint8(generated_image[0])

                artist_array.append(
                    axis_observation.imshow(frame,
                                            interpolation="none",
                                            animated=True))
                artist_array.append(
                    axis_generation.imshow(generated_image, animated=True))

                query_viewpoint_angle_rad += 2 * math.pi / total_frames_per_rotation
                artist_frame_array.append(artist_array)
                anim = animation.ArtistAnimation(fig,
                                                 artist_frame_array,
                                                 interval=1 / 24,
                                                 blit=True,
                                                 repeat_delay=0)
                anim.save("{}/shepard_matzler_uncertainty_{}.mp4".format(
                    args.figure_directory, scene_index),
                          writer="ffmpeg",
                          fps=12)