Exemplo n.º 1
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.next_event_estimation_enabled = True
    rt_args.ambient_light_intensity = 0.1

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                   aspect_ratio=screen_width / screen_height)

    for _ in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for _ in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.uniform(-2.5, 2.5, size=2)
            rand_lookat_xz = np.random.uniform(-6, 6, size=2)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            look_at = np.array([rand_lookat_xz[0], 1, rand_lookat_xz[1]])

            # Compute yaw and pitch
            camera_direction = rand_position_xz - rand_lookat_xz
            camera_direction = np.array(
                [camera_direction[0], 0, camera_direction[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera.look_at(tuple(camera_position),
                           tuple(look_at),
                           up=(0, 1, 0))
            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)
Exemplo n.º 2
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_objects(scene,
                      colors,
                      objects,
                      max_num_objects=args.max_num_objects,
                      discrete_position=args.discrete_position,
                      rotate_object=args.rotate_object)
        camera_distance = 3
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.uniform(-3, 3, size=2)
            rand_lookat_xz = np.random.uniform(-6, 6, size=2)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])

            # Compute yaw and pitch
            camera_direction = rand_position_xz - rand_lookat_xz
            camera_direction = np.array(
                [camera_direction[0], 0, camera_direction[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()
Exemplo n.º 3
0
def main():
    # Colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append(np.array((red, green, blue, 1)))

    floor_textures = [
        "../textures/lg_floor_d.tga",
        "../textures/lg_style_01_floor_blue_d.tga",
        "../textures/lg_style_01_floor_orange_bright_d.tga",
    ]

    wall_textures = [
        "../textures/lg_style_01_wall_cerise_d.tga",
        "../textures/lg_style_01_wall_green_bright_d.tga",
        "../textures/lg_style_01_wall_red_bright_d.tga",
        "../textures/lg_style_02_wall_yellow_d.tga",
        "../textures/lg_style_03_wall_orange_bright_d.tga",
    ]

    objects = [
        pyrender.objects.Capsule,
        pyrender.objects.Cylinder,
        pyrender.objects.Icosahedron,
        pyrender.objects.Box,
        pyrender.objects.Sphere,
    ]

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    plt.tight_layout()
    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_objects(scene,
                  colors,
                  objects,
                  min_num_objects=args.num_objects,
                  max_num_objects=args.num_objects,
                  discrete_position=args.discrete_position,
                  rotate_object=args.rotate_object)

    camera_distance = 5
    perspective_camera = PerspectiveCamera(yfov=math.pi / 4)
    perspective_camera_node = Node(camera=perspective_camera,
                                   translation=np.array([0, 1, 1]))
    orthographic_camera = OrthographicCamera(xmag=3, ymag=3)
    orthographic_camera_node = Node(camera=orthographic_camera)

    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)
    current_rad = 0
    for _ in range(total_frames):
        scene.add_node(perspective_camera_node)

        # Perspective camera
        camera_xz = camera_distance * np.array(
            (math.sin(current_rad), math.cos(current_rad)))
        # Compute yaw and pitch
        camera_direction = np.array([camera_xz[0], 0, camera_xz[1]])
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        perspective_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        camera_position = np.array([camera_xz[0], 1, camera_xz[1]])
        perspective_camera_node.translation = camera_position

        # Rendering
        flags = RenderFlags.SHADOWS_DIRECTIONAL
        if args.anti_aliasing:
            flags |= RenderFlags.ANTI_ALIASING
        image = renderer.render(scene, flags=flags)[0]
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)
        scene.remove_node(perspective_camera_node)

        # Orthographic camera
        scene.add_node(orthographic_camera_node)
        camera_direction = camera_distance * np.array(
            (math.sin(current_rad), math.sin(
                math.pi / 6), math.cos(current_rad)))
        yaw, pitch = compute_yaw_and_pitch(camera_direction)

        orthographic_camera_node.rotation = genearte_camera_quaternion(
            yaw, pitch)
        orthographic_camera_node.translation = np.array(
            [camera_direction[0], 4, camera_direction[2]])

        image = renderer.render(scene, flags=flags)[0]

        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        plt.pause(1e-8)

        current_rad += rad_step
        scene.remove_node(orthographic_camera_node)

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "rooms"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_object:
        filename += "_rotate_object"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
Exemplo n.º 4
0
def main():
    # Set GPU device
    rtx.set_device(args.gpu_device)

    # Initialize colors
    colors = []
    for n in range(args.num_colors):
        hue = n / args.num_colors
        saturation = 1
        lightness = 1
        red, green, blue = colorsys.hsv_to_rgb(hue, saturation, lightness)
        colors.append((red, green, blue, 1))

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.next_event_estimation_enabled = True
    rt_args.ambient_light_intensity = 0.1

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    perspective_camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                               aspect_ratio=screen_width /
                                               screen_height)
    orthographic_camera = rtx.OrthographicCamera()
    camera_distance = 2

    plt.tight_layout()

    scene = build_scene(floor_textures,
                        wall_textures,
                        fix_light_position=args.fix_light_position)
    place_objects(scene,
                  colors,
                  min_num_objects=args.num_objects,
                  max_num_objects=args.num_objects,
                  discrete_position=args.discrete_position,
                  rotate_object=args.rotate_object)

    current_rad = 0
    rad_step = math.pi / 36
    total_frames = int(math.pi * 2 / rad_step)

    fig = plt.figure(figsize=(6, 3))
    axis_perspective = fig.add_subplot(1, 2, 1)
    axis_orthogonal = fig.add_subplot(1, 2, 2)
    ims = []

    for _ in range(total_frames):
        # Perspective camera
        camera_position = (camera_distance * math.cos(current_rad),
                           wall_height / 2,
                           camera_distance * math.sin(current_rad))
        center = (0, wall_height / 2, 0)

        perspective_camera.look_at(camera_position, center, up=(0, 1, 0))
        renderer.render(scene, perspective_camera, rt_args, cuda_args,
                        render_buffer)

        image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
        image = np.uint8(image * 255)
        image = cv2.bilateralFilter(image, 3, 25, 25)
        im1 = axis_perspective.imshow(image,
                                      interpolation="none",
                                      animated=True)

        # Orthographic camera
        offset_y = 1
        camera_position = (2 * math.cos(current_rad),
                           2 * math.sin(math.pi / 6) + offset_y,
                           2 * math.sin(current_rad))
        center = (0, offset_y, 0)

        orthographic_camera.look_at(camera_position, center, up=(0, 1, 0))
        renderer.render(scene, orthographic_camera, rt_args, cuda_args,
                        render_buffer)

        image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
        image = np.uint8(image * 255)
        image = cv2.bilateralFilter(image, 3, 25, 25)
        im2 = axis_orthogonal.imshow(image,
                                     interpolation="none",
                                     animated=True)
        ims.append([im1, im2])

        # plt.pause(1e-8)

        current_rad += rad_step

    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1 / 24,
                                    blit=True,
                                    repeat_delay=0)
    filename = "rooms"
    if args.discrete_position:
        filename += "_discrete_position"
    if args.rotate_object:
        filename += "_rotate_object"
    if args.fix_light_position:
        filename += "_fix_light_position"
    filename += ".gif"
    ani.save(filename, writer="imagemagick")
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    last_file_number = args.initial_file_number + args.total_scenes // args.num_scenes_per_file - 1
    initial_file_number = args.initial_file_number
    if os.path.isdir(args.output_directory):
        files = os.listdir(args.output_directory)
        for name in files:
            number = int(name.replace(".h5", ""))
            if number > last_file_number:
                continue
            if number < args.initial_file_number:
                continue
            if number < initial_file_number:
                continue
            initial_file_number = number + 1
    total_scenes_to_render = args.total_scenes - args.num_scenes_per_file * (
        initial_file_number - args.initial_file_number)

    assert args.num_scenes_per_file <= total_scenes_to_render

    # Load MNIST images
    mnist_images = load_mnist_images()

    # Set GPU device
    rtx.set_device(args.gpu_device)

    screen_width = args.image_size
    screen_height = args.image_size

    # Setting up a raytracer
    rt_args = rtx.RayTracingArguments()
    rt_args.num_rays_per_pixel = 1024
    rt_args.max_bounce = 3
    rt_args.supersampling_enabled = args.anti_aliasing
    rt_args.next_event_estimation_enabled = True
    rt_args.ambient_light_intensity = 0.1

    cuda_args = rtx.CUDAKernelLaunchArguments()
    cuda_args.num_threads = 64
    cuda_args.num_rays_per_thread = 32

    renderer = rtx.Renderer()
    render_buffer = np.zeros((screen_height, screen_width, 3),
                             dtype=np.float32)

    archiver = Archiver(
        directory=args.output_directory,
        num_scenes_per_file=args.num_scenes_per_file,
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=initial_file_number)

    camera = rtx.PerspectiveCamera(fov_rad=math.pi / 3,
                                   aspect_ratio=screen_width / screen_height)
    camera_distance = 2

    for _ in tqdm(range(total_scenes_to_render)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_dice(scene,
                   mnist_images,
                   discrete_position=args.discrete_position,
                   rotate_dice=args.rotate_dice)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for _ in range(args.num_observations_per_scene):
            # Sample camera position
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)
            camera_position = np.array(
                (rand_position_xz[0], wall_height / 2, rand_position_xz[1]))
            center = np.array((0, wall_height / 2, 0))

            # Compute yaw and pitch
            camera_direction = camera_position - center
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera.look_at(tuple(camera_position), tuple(center), up=(0, 1, 0))
            renderer.render(scene, camera, rt_args, cuda_args, render_buffer)

            # Convert to sRGB
            image = np.power(np.clip(render_buffer, 0, 1), 1.0 / 2.2)
            image = np.uint8(image * 255)
            image = cv2.bilateralFilter(image, 3, 25, 25)

            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)
Exemplo n.º 6
0
def main():
    try:
        os.makedirs(args.output_directory)
    except:
        pass

    # Load MNIST images
    mnist_images = load_mnist_images()

    renderer = OffscreenRenderer(viewport_width=args.image_size,
                                 viewport_height=args.image_size)

    archiver = Archiver(
        directory=args.output_directory,
        total_scenes=args.total_scenes,
        num_scenes_per_file=min(args.num_scenes_per_file, args.total_scenes),
        image_size=(args.image_size, args.image_size),
        num_observations_per_scene=args.num_observations_per_scene,
        initial_file_number=args.initial_file_number)

    for scene_index in tqdm(range(args.total_scenes)):
        scene = build_scene(floor_textures,
                            wall_textures,
                            fix_light_position=args.fix_light_position)
        place_dice(scene,
                   mnist_images,
                   discrete_position=args.discrete_position,
                   rotate_dice=args.rotate_dice)

        camera_distance = 4
        camera = PerspectiveCamera(yfov=math.pi / 4)
        camera_node = Node(camera=camera, translation=np.array([0, 1, 1]))
        scene.add_node(camera_node)
        scene_data = SceneData((args.image_size, args.image_size),
                               args.num_observations_per_scene)
        for observation_index in range(args.num_observations_per_scene):
            rand_position_xz = np.random.normal(size=2)
            rand_position_xz = camera_distance * rand_position_xz / np.linalg.norm(
                rand_position_xz)

            # Compute yaw and pitch
            camera_direction = np.array(
                [rand_position_xz[0], 0, rand_position_xz[1]])
            yaw, pitch = compute_yaw_and_pitch(camera_direction)

            camera_node.rotation = genearte_camera_quaternion(yaw, pitch)
            camera_position = np.array(
                [rand_position_xz[0], 1, rand_position_xz[1]])
            camera_node.translation = camera_position

            # Rendering
            flags = RenderFlags.SHADOWS_DIRECTIONAL
            if args.anti_aliasing:
                flags |= RenderFlags.ANTI_ALIASING
            image = renderer.render(scene, flags=flags)[0]
            scene_data.add(image, camera_position, math.cos(yaw),
                           math.sin(yaw), math.cos(pitch), math.sin(pitch))

            if args.visualize:
                plt.clf()
                plt.imshow(image)
                plt.pause(1e-10)

        archiver.add(scene_data)

    renderer.delete()