コード例 #1
0
def render_scene(
    args,
    num_objects=5,
    output_index=0,
    output_split='none',
    output_image_dir='../output/images/',
    output_image='render.png',
    output_scene='render.json',
    output_blendfile=None,
):

    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        print(bpy.app.version)
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            print(bpy.context.preferences.addons['cycles'])
            cycles_prefs = bpy.context.preferences.addons['cycles'].preferences
            cuda_devices, opencl_devices = cycles_prefs.get_devices()
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'image_index': output_index,
        'image_filename': os.path.basename(output_image),
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    if bpy.app.version <= (2, 79, 0):
        bpy.ops.mesh.primitive_plane_add(radius=5)
    else:
        bpy.ops.mesh.primitive_plane_add(size=5)

    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Add random jitter to camera position
    # if args.camera_jitter > 0:
    #   for i in range(3):
    #     bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['Camera']

    print('Camera location', camera.location)
    print('Camera matrix world', camera.matrix_world)

    plane_normal = plane.data.vertices[0].normal
    if bpy.app.version <= (2, 79, 0):
        cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
        cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
        cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    else:
        cam_behind = camera.matrix_world.to_quaternion() @ Vector((0, 0, -1))
        cam_left = camera.matrix_world.to_quaternion() @ Vector((-1, 0, 0))
        cam_up = camera.matrix_world.to_quaternion() @ Vector((0, 1, 0))

        # cam_behind = Vector((0, 0, 0))
        # cam_behind[0] = - camera.matrix_world[0][2]
        # cam_behind[1] = - camera.matrix_world[1][2]
        # cam_behind[2] = - camera.matrix_world[2][2]
        # cam_left = Vector((0, 0, 0))
        # cam_left[0] = - camera.matrix_world[0][0]
        # cam_left[1] = - camera.matrix_world[1][0]
        # cam_left[2] = - camera.matrix_world[2][0]
        # cam_up = Vector((0, 0, 0))
        # cam_up[0] = camera.matrix_world[0][1]
        # cam_up[1] = camera.matrix_world[1][1]
        # cam_up[2] = camera.matrix_world[2][1]

    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    # Now make some random objects
    objects, blender_objects = add_random_objects(scene_struct, num_objects,
                                                  args, camera)

    # # Choose pose to render from
    # centers = [obj['3d_coords'] for obj in objects]
    # objects_centroid = np.mean(np.array(centers), axis=0)

    poses = utils.sample_poses(args.imgs_per_scene, 10)

    # Check visibility here and if one object not visible in either view then sample 2 new poses
    # for i in range(args.imgs_per_scene):
    #   for j in range(4):
    #     for k in range(4):
    #       bpy.data.objects['Camera'].matrix_world[j][k] = poses[i][j, k]
    #   all_visible = check_visibility(blender_objects, args.min_pixels_per_object)
    #   print('all visible', all_visible)
    #   break

    # Render object masks
    # print(bpy.context.scene.view_layers.values())
    bpy.context.scene.use_nodes = True
    bpy.context.scene.view_layers["RenderLayer"].use_pass_object_index = True
    for i, obj in enumerate(bpy.context.scene.objects):
        obj.pass_index = i
        # bpy.ops.node.add_node(type="CompositorNodeOutputFile", use_transform=True)
        # bpy.data.scenes["Scene"].node_tree.nodes["File Output.00{}".format(i+1)].base_path = output_masks_dir
    print(bpy.context.object.pass_index)

    # Render the scene and dump the scene data structure
    for i in range(args.imgs_per_scene):
        render_args.filepath = output_image_dir + 's{}v{}.png'.format(
            output_index, i)

        for j in range(4):
            for k in range(4):
                bpy.data.objects['Camera'].matrix_world[j][k] = poses[i][j, k]

        while True:
            try:
                bpy.ops.render.render(write_still=True)
                break
            except Exception as e:
                print(e)

        if output_blendfile is not None:
            bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)

    scene_struct['objects'] = objects
    scene_struct['relationships'] = compute_all_relationships(scene_struct)
    scene_struct['poses'] = [pose.tolist() for pose in poses]
    with open(output_scene, 'w') as f:
        json.dump(scene_struct, f, indent=2)
コード例 #2
0
def render_scene(
        args,
        num_objects=5,
        output_index=0,
        output_split='none',
        output_image='render.png',
        output_scene='render_json',
        output_blendfile=None):
    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    bpy.ops.screen.frame_jump(end=False)
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    render_args.image_settings.file_format = 'AVI_JPEG'
    # Video params
    bpy.context.scene.frame_start = 0
    bpy.context.scene.frame_end = args.num_frames  # same as kinetics
    render_args.fps = args.fps

    if args.cpu is False:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'
            # # In case more than 1 device passed in, use only the first one
            # Not effective, CUDA_VISIBLE_DEVICES before running singularity
            # works fastest.
            # if len(cycles_prefs.devices) > 2:
            #     for device in cycles_prefs.devices:
            #         device.use = False
            #     cycles_prefs.devices[1].use = True
            #     print('Too many GPUs ({}). Using {}. Set only 1 before '
            #           'running singularity.'.format(
            #               len(cycles_prefs.devices),
            #               cycles_prefs.devices[1]))

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.cpu is False:
        bpy.context.scene.cycles.device = 'GPU'

    if output_blendfile is not None and os.path.exists(output_blendfile):
        logging.info('Loading pre-defined BLEND file from {}'.format(
            output_blendfile))
        bpy.ops.wm.open_mainfile(filepath=output_blendfile)
    else:
        setup_scene(
            args, num_objects, output_index, output_split,
            output_image, output_scene)
    print_camera_matrix()
    if args.random_camera:
        add_random_camera_motion(args.num_frames)
    if output_blendfile is not None and not os.path.exists(output_blendfile):
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
    max_num_render_trials = 10
    if args.render:
        while max_num_render_trials > 0:
            try:
                if args.suppress_blender_logs:
                    # redirect output to log file
                    logfile = '/dev/null'
                    open(logfile, 'a').close()
                    old = os.dup(1)
                    sys.stdout.flush()
                    os.close(1)
                    os.open(logfile, os.O_WRONLY)
                bpy.ops.render.render(animation=True)
                if args.suppress_blender_logs:
                    # disable output redirection
                    os.close(1)
                    os.dup(old)
                    os.close(old)
                break
            except Exception as e:
                max_num_render_trials -= 1
                print(e)
コード例 #3
0
def render_scene(
    args,
    num_objects=5,
    output_index=0,
    output_split='none',
    output_image='render.png',
    output_scene='render_json',
    output_blendfile=None,
):

    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'image_index': output_index,
        'image_filename': os.path.basename(output_image),
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Add random jitter to camera position
    if args.camera_jitter > 0:
        for i in range(3):
            bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['Camera']
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    # Now make some random objects
    objects, blender_objects = add_random_objects(scene_struct, num_objects,
                                                  args, camera)

    ## Added (Start)
    # instance id is stored as 32 bit float
    # if there are N objects, then id 0 ~ N - 1 is randomly assigned to each
    # assign id to each object
    for i, o in enumerate(blender_objects):
        o.pass_index = i

    # add new node for composition
    bpy.context.scene.use_nodes = True
    tree = bpy.context.scene.node_tree
    bpy.context.scene.render.layers["RenderLayer"].use_pass_object_index = True
    node = tree.nodes.new(type="CompositorNodeOutputFile")
    node.base_path = '../output/images'
    node.format.file_format = 'OPEN_EXR'

    # for instance segmentation
    node.file_slots[0].path = 'inst'
    tree.links.new(tree.nodes["Render Layers"].outputs['IndexOB'],
                   node.inputs[0])

    # for rendered image
    node.layer_slots.new('Image')
    node.file_slots[1].path = 'rgb'
    node.file_slots[1].use_node_format = False
    node.file_slots[1].format.file_format = 'PNG'
    tree.links.new(tree.nodes["Render Layers"].outputs['Image'],
                   node.inputs[1])
    ## Added (end)

    # Render the scene and dump the scene data structure
    scene_struct['objects'] = objects
    scene_struct['relationships'] = compute_all_relationships(scene_struct)
    while True:
        try:
            # bpy.ops.render.render(write_still=True)
            bpy.ops.render.render()
            break
        except Exception as e:
            print(e)

    with open(output_scene, 'w') as f:
        json.dump(scene_struct, f, indent=2)

    if output_blendfile is not None:
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
コード例 #4
0
def render_semantic_change(
    args,
    default_config,
    output_index=0,
    output_split='none',
    output_image='render.png',
    output_scene='render_json',
    output_blendfile=None,
    change_type='random',
):

    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'image_index': output_index,
        'image_filename': os.path.basename(output_image),
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Randomly gitter camera from the default location
    #default_camera = default_config['camera']
    default_camera_jitters = default_config['camera_jitters']
    if args.camera_jitter > 0:
        for i in range(3):
            rand_camera_jitter = rand(args.camera_jitter)
            bpy.data.objects['Camera'].location[i] += (
                default_camera_jitters[i] + rand_camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['Camera']
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Use the same lamp light jitters
    default_key_jitters = default_config['key_light_jitters']
    default_back_jitters = default_config['back_light_jitters']
    default_fill_jitters = default_config['fill_light_jitters']
    for i in range(3):
        bpy.data.objects['Lamp_Key'].location[i] += default_key_jitters[i]
    for i in range(3):
        bpy.data.objects['Lamp_Back'].location[i] += default_back_jitters[i]
    for i in range(3):
        bpy.data.objects['Lamp_Fill'].location[i] += default_fill_jitters[i]
    """
  if args.key_light_jitter > 0:
    for i in range(3):
      rand_key_light_jitter = rand(args.key_light_jitter)
      bpy.data.objects['Lamp_Key'].location[i] += rand_key_light_jitter
  if args.back_light_jitter > 0:
    for i in range(3):
      rand_back_light_jitter = rand(args.back_light_jitter)
      bpy.data.objects['Lamp_Back'].location[i] += rand_back_light_jitter
  if args.fill_light_jitter > 0:
    for i in range(3):
      rand_fill_light_jitter = rand(args.fill_light_jitter)
      bpy.data.objects['Lamp_Fill'].location[i] += rand_fill_light_jitter
  """

    # Now make some semantic changes to default objects
    default_objects = default_config['objects']
    sc_objects, sc_blend_objects, success = \
      apply_change(default_objects, scene_struct, args, camera, change_type)
    if not success:
        print(
            'Could not semantically change the given scene for change type: %s'
            % change_type)
        return False

    # Render the scene and dump the scene data structure
    scene_struct['objects'] = sc_objects
    scene_struct['relationships'] = compute_all_relationships(scene_struct)
    while True:
        try:
            bpy.ops.render.render(write_still=True)
            break
        except Exception as e:
            print(e)

    with open(output_scene, 'w') as f:
        json.dump(scene_struct, f, indent=2)

    if output_blendfile is not None:
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)

    return True
コード例 #5
0
def render_scene(args,
                 num_objects=5,
                 output_index=0,
                 output_split='none',
                 output_image='render.png',
                 output_scene='render_json',
                 output_blendfile=None,
                 img_template='image%d.png'):

    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # node_path = '/home/bozidar/uni/prac/repos/clevr-dataset-gen/image_generation/data/NodeGroupMulti4.blend'
    node_path = '/home/bozidar/uni/prac/repos/clevr-dataset-gen/image_generation/data/NodeGroup.blend'
    with bpy.data.libraries.load(node_path) as (data_from, data_to):
        data_to.objects = data_from.objects
        data_to.materials = data_from.materials
        data_to.node_groups = data_from.node_groups
    node_mat = data_to.materials[0]
    node_group_elems = data_to.node_groups[0].nodes[
        "ColorRamp"].color_ramp.elements
    # for i in segm_colors:
    #   print(list(i.color))

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'image_index': output_index,
        'image_filename': os.path.basename(output_image),
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    if bpy.app.version < (2, 80, 0):
        bpy.ops.mesh.primitive_plane_add(radius=5)
    else:
        bpy.ops.mesh.primitive_plane_add(size=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Add random jitter to camera position
    if args.camera_jitter > 0:
        for i in range(3):
            bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['Camera']
    plane_normal = plane.data.vertices[0].normal
    if bpy.app.version < (2, 80, 0):
        cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
        cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
        cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    else:
        cam_behind = camera.matrix_world.to_quaternion() @ Vector((0, 0, -1))
        cam_left = camera.matrix_world.to_quaternion() @ Vector((-1, 0, 0))
        cam_up = camera.matrix_world.to_quaternion() @ Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    # Now make some random objects
    objects, blender_objects = add_random_objects(scene_struct, num_objects,
                                                  args, camera)

    # Segmentation materials and colors
    n = len(objects)
    node_mat.node_tree.nodes['Group'].inputs[1].default_value = n
    segm_mat = []
    segm_color = []
    for i in range(n + 1):
        node_mat.node_tree.nodes['Group'].inputs[0].default_value = i
        segm_mat.append(node_mat.copy())
        segm_color.append(list(node_group_elems[i].color))
    print(segm_mat)
    print(segm_color)

    angles = [-50, 90]
    steps = 5
    for i, a in enumerate(np.linspace(*angles, steps)):
        # position = bpy.data.objects['Lamp_Key'].location
        # r = R.from_euler(axis, a, degrees=True).as_matrix()
        r = mathutils.Euler((0.0, math.radians(a), 0.0), 'XYZ')
        # r = mathutils.Euler((math.radians(30), math.radians(a), 0.0), 'XYZ')
        # bpy.data.objects['Lamp_Back'].location.rotate(r)
        # bpy.data.objects['Area'].location.rotate(r)
        bpy.data.objects['Area'].rotation_euler = r

        scene_struct['image_index'] = output_index * steps + i
        render_args.filepath = img_template % (output_index * steps + i)

        # bpy.data.objects['Sphere_0'].select = True
        # obj = bpy.context.selected_objects
        # for obj in bpy.context.selected_objects:
        # obj.select = False
        # bpy.context.scene.objects.active = None
        # bpy.context.scene.objects.active = bpy.data.objects['Ground']

        print('---------------------------')
        print(objects)
        print('---------------------------')
        print(bpy.data.objects.items())
        print('---------------------------')

        # exit()

        # Render the scene and dump the scene data structure
        scene_struct['objects'] = objects
        scene_struct['relationships'] = compute_all_relationships(scene_struct)
        while True:
            try:
                bpy.ops.render.render(write_still=True)
                break
            except Exception as e:
                print(e)

        with open(output_scene, 'w') as f:
            json.dump(scene_struct, f, indent=2)

        if output_blendfile is not None:
            print('===============================>', output_blendfile)
            bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)

        # segm rendering
        s = render_args.filepath
        ind = s.rindex('.')
        render_args.filepath = s[:ind] + '_segm' + s[ind:]

        prev_mat = []

        bpy.data.objects['Ground'].data.materials.clear()
        bpy.data.objects['Ground'].data.materials.append(segm_mat[0])
        for i in range(n):
            prev_mat.append(bpy.data.objects[i - n].data.materials[0])
            scene_name = bpy.data.objects[i - n].name
            index = -1
            for obj in objects:
                if obj['scene_name'] == scene_name:
                    index = obj['index']
                    obj['segm_color'] = segm_color[obj['index'] + 1]

            bpy.data.objects[i - n].data.materials.clear()
            bpy.data.objects[i - n].data.materials.append(segm_mat[index + 1])

        while True:
            try:
                bpy.ops.render.render(write_still=True)
                break
            except Exception as e:
                print(e)

        bpy.data.objects['Ground'].data.materials.clear()
        for i in range(n):
            bpy.data.objects[i - n].data.materials.clear()
            bpy.data.objects[i - n].data.materials.append(prev_mat[i])
コード例 #6
0
def render_scene(args,
    num_objects=5,
    num_images=0,
    output_split='none',
    image_template='render.png',
    scene_template='render_json',
    arr_template='arr',
    output_blendfile=None,
    directions={1: 'no', 2: 'no', 3: 'no', 4: 'no', 5: 'no', 6: 'no'}
  ):
  
  for object_name in bpy.data.objects.keys():
    if 'Sphere' in object_name or\
       'Cylinder' in object_name or\
       'Cube' in object_name or\
       'Duck' in object_name or\
       'Peg' in object_name or\
       'Disk' in object_name or\
       'Bowl' in object_name:
       utils.delete_object_by_name(object_name)

  # Load materials
  utils.load_materials(args.material_dir)

  # Set render arguments so we can get pixel coordinates later.
  # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
  # cannot be used.
  render_args = bpy.context.scene.render
  render_args.engine = "CYCLES"
  render_args.resolution_x = args.width
  render_args.resolution_y = args.height
  render_args.resolution_percentage = 100
  render_args.tile_x = args.render_tile_size
  render_args.tile_y = args.render_tile_size
  if args.use_gpu == 1:
    # Blender changed the API for enabling CUDA at some point
    if bpy.app.version < (2, 78, 0):
      bpy.context.user_preferences.system.compute_device_type = 'CUDA'
      bpy.context.user_preferences.system.compute_device = 'CUDA_0'
    else:
      cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
      cycles_prefs.compute_device_type = 'CUDA'

  # Some CYCLES-specific stuff
  bpy.data.worlds['World'].cycles.sample_as_light = True
  bpy.context.scene.cycles.blur_glossy = 2.0
  bpy.context.scene.cycles.samples = args.render_num_samples
  bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
  bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
  if args.use_gpu == 1:
    bpy.context.scene.cycles.device = 'GPU'

  # This will give ground-truth information about the scene and its objects
  scene_struct = {
      'split': output_split,
      'objects': [],
      'directions': {},
  }

  # Put a plane on the ground so we can compute cardinal directions
  bpy.ops.mesh.primitive_plane_add(radius=5)
  plane = bpy.context.object

  def rand(L):
    return 2.0 * L * (random.random() - 0.5)

  # Add random jitter to camera position
  if args.camera_jitter > 0:
    for i in range(3):
      bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

  # Figure out the left, up, and behind directions along the plane and record
  # them in the scene structure
  camera = bpy.data.objects['Camera']
  plane_normal = plane.data.vertices[0].normal
  cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
  cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
  cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
  plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
  plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
  plane_up = cam_up.project(plane_normal).normalized()

  # Delete the plane; we only used it for normals anyway. The base scene file
  # contains the actual ground plane.
  utils.delete_object(plane)

  # Save all six axis-aligned directions in the scene struct
  scene_struct['directions']['behind'] = tuple(plane_behind)
  scene_struct['directions']['front'] = tuple(-plane_behind)
  scene_struct['directions']['left'] = tuple(plane_left)
  scene_struct['directions']['right'] = tuple(-plane_left)
  scene_struct['directions']['above'] = tuple(plane_up)
  scene_struct['directions']['below'] = tuple(-plane_up)

  # Add random jitter to lamp positions
  if args.key_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
  if args.back_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
  if args.fill_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)


  objects = []
  blender_objects = []
  direction_vec = []
  traj = []

  for scene_idx in range(num_images + 20):

    image_path = image_template % (scene_idx + args.start_idx)
    render_args.filepath = image_path
  
    scene_path = scene_template % (scene_idx + args.start_idx)
    arr_path = arr_template % (scene_idx + args.start_idx)

    if scene_idx == 0:

      init_positions = []

      # x = random.uniform(-0.2, 0.2)
      # y = random.uniform(-0.2, 0.2)
      poss_values = np.concatenate((np.linspace(-2, -1, 5, endpoint=True), 
                                    np.linspace(1, 2, 5, endpoint=True)))
      x = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
      y = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
      # z = random.uniform(0, 1)
      z = 0
      ref_obj_loc = np.array([x, y, z])
      init_positions.append(ref_obj_loc)

      # x = -2 + random.uniform(-0.2, 0.2)
      # y = -2 + random.uniform(-0.2, 0.2)
      poss_values = np.concatenate((np.linspace(-2, -1, 5, endpoint=True), 
                                    np.linspace(1, 2, 5, endpoint=True)))
      x = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
      y = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
      # z = random.uniform(0, 1)
      z = 0

      while np.linalg.norm(np.array([x,y,z]) - ref_obj_loc) < 1:
        x = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
        y = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
        z = 0

      tar_obj_loc = np.array([x, y, z])
      init_positions.append(tar_obj_loc)

      sizes = []
      inout = False
      curvy = False
      # Now make some random objects
      objects, blender_objects = add_random_objects(scene_struct, num_objects, args, camera, \
                                                    init_positions, sizes=sizes, inout=inout)





      init_position = np.array(blender_objects[1].location)

      # SQUARE AROUND
      # intermediate_positions = []
      # z = np.array(blender_objects[1].location)[2]
      # y += 4 + random.uniform(-0.2, 0.2) # x = -2, y = 2
      # intermediate_positions.append([x, y, z])
      # # ----------------------------------------
      # x += 4 + random.uniform(-0.2, 0.2) # x = 2, y = 2
      # intermediate_positions.append([x, y, z])
      # # ----------------------------------------
      # y -= 4 + random.uniform(-0.2, 0.2) # x = 2, y = -2
      # intermediate_positions.append([x, y, z])
      # # ----------------------------------------
      # x -= 4 # x = -2, y = -2
      # intermediate_positions.append([x, y, z])



      # OFF/ON/OFF
      curvy = True
      intermediate_positions = []
      # intermediate_positions.append([x, y, objects[1]['r']])
      x = np.array(blender_objects[0].location)[0]
      y = np.array(blender_objects[0].location)[1]
      z = np.array(blender_objects[0].location)[2] + objects[0]['r'] + objects[1]['r']
      intermediate_positions.append([x, y, z])
      # intermediate_positions.append([x, y, z])
      # ----------------------------------------
      # x = 2 + random.uniform(-0.2, 0.2)
      # y = -2 + random.uniform(-0.2, 0.2)
      poss_values = np.concatenate((np.linspace(-2, -1, 5, endpoint=True), 
                                    np.linspace(1, 2, 5, endpoint=True)))
      x = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
      y = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
      z = objects[1]['r']

      while np.linalg.norm(np.array([x,y,z]) - np.array(blender_objects[0].location)) < 1:
        x = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
        y = np.random.choice(poss_values) + random.uniform(-0.2, 0.2)
        z = objects[1]['r']

      intermediate_positions.append([x, y, z])
      # intermediate_positions.append([x, y, z])



      # UP/ABOVE-ACROSS/DOWN
      # intermediate_positions = []
      # z = np.array(blender_objects[0].location)[2] + objects[0]['r'] + 2*objects[1]['r']
      # intermediate_positions.append([x, y, z])
      # # ----------------------------------------
      # x = 2 + random.uniform(-0.2, 0.2)
      # y = 2 + random.uniform(-0.2, 0.2)
      # intermediate_positions.append([x, y, z])
      # # ----------------------------------------
      # # z = np.array(blender_objects[1].location)[2]
      # z = objects[1]['r']
      # intermediate_positions.append([x, y, z])


      traj = calculate_trajectory(init_position, intermediate_positions, args, curvy=curvy)
      traj = traj[:9] + [(1, x) for x in [traj[9][1]]*10] + traj[9:] + [(1, x) for x in [traj[-1][1]]*10]

    else:
      move_obj_idx, pos = traj[scene_idx-1]

      move_object(blender_objects[move_obj_idx], pos)
      
      pixel_coords = utils.get_camera_coords(camera, blender_objects[move_obj_idx].location)
      objects[move_obj_idx]['pixel_coords'] = pixel_coords

      # <Vector (-1.6002, -1.5445, 1.9500)>
      objects[move_obj_idx]['3d_coords'] = list(blender_objects[move_obj_idx].location)
      
      # <Euler (x=0.0000, y=0.0000, z=139.0579), order='XYZ'>
      objects[move_obj_idx]['rotation'] = blender_objects[move_obj_idx].rotation_euler[2]






    ### get b_box
    box_dict = get_b_box.main(bpy.context, blender_objects)
    for _id in box_dict:
      objects[_id]['bbox'] = box_dict[_id]

    # Render the scene and dump the scene data structure
    scene_struct['objects'] = objects
    scene_struct['relationships'] = compute_all_relationships(scene_struct)


    ############ ADDED ############
    tree = bpy.context.scene.node_tree
    links = tree.links
    rl = tree.nodes['Render Layers']
    v = tree.nodes['Viewer']

    links.new(rl.outputs[0], v.inputs[0])
    while True:
      try:
        bpy.ops.render.render(write_still=True)
        break
      except Exception as e:
        print(e)
    links.remove(links[0])

    # get viewer pixels
    rgb_pixels = bpy.data.images['Viewer Node'].pixels
    rgb_pixels = np.array(rgb_pixels[:])
    rgb_pixels = np.power(rgb_pixels, 1/2.2)
    rgb_pixels[rgb_pixels > 1] = 1
    rgb_pixels = rgb_pixels.reshape(args.height, args.width, 4)[...,:3]

    links.new(rl.outputs[2], v.inputs[0])
    render_shadeless(blender_objects, lights_off=False)
    links.remove(links[0])

    # get viewer pixels
    depth_pixels = bpy.data.images['Viewer Node'].pixels
    depth_pixels = np.array(depth_pixels[:])
    depth_pixels = depth_pixels.reshape(args.height, args.width, 4)[...,0, None]


    links.new(rl.outputs[0], v.inputs[0])
    render_shadeless(blender_objects)
    links.remove(links[0])

    # get viewer pixels
    mask_pixels = bpy.data.images['Viewer Node'].pixels
    mask_pixels = np.array(mask_pixels[:])
    mask_pixels = mask_pixels.reshape(args.height, args.width, 4)[...,:3]

    pixels = np.concatenate((rgb_pixels, depth_pixels, mask_pixels), axis=2)
    pixels = np.flipud(pixels)

    utils.save_arr(pixels, arr_path)  
    ############ ADDED ############

    with open(scene_path, 'w') as f:
      json.dump(scene_struct, f, indent=2)
コード例 #7
0
def render_scene(args,
    num_objects=5,
    output_index=0,
    output_split='none',
    output_image='render.png',
    output_scene='render_json',
    output_masks=None,
    output_blendfile=None,
  ):

  # Load the main blendfile
  bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

  # Load materials
  utils.load_materials(args.material_dir)

  # Set render arguments so we can get pixel coordinates later.
  # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
  # cannot be used.
  render_args = bpy.context.scene.render
  render_args.engine = "CYCLES"
  render_args.filepath = output_image
  render_args.resolution_x = args.width
  render_args.resolution_y = args.height
  render_args.resolution_percentage = 100
  render_args.tile_x = args.render_tile_size
  render_args.tile_y = args.render_tile_size
  if args.use_gpu == 1:
    # Blender changed the API for enabling CUDA at some point
    if bpy.app.version < (2, 78, 0):
      bpy.context.user_preferences.system.compute_device_type = 'CUDA'
      bpy.context.user_preferences.system.compute_device = 'CUDA_0'
    else:
      cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
      cycles_prefs.compute_device_type = 'CUDA'

  # Some CYCLES-specific stuff
  bpy.data.worlds['World'].cycles.sample_as_light = True
  bpy.context.scene.cycles.blur_glossy = 2.0
  bpy.context.scene.cycles.samples = args.render_num_samples
  bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
  bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
  if args.use_gpu == 1:
    bpy.context.scene.cycles.device = 'GPU'

  if args.output_depth:
    # Following is based on stanford-shapenet-renderer
    bpy.context.scene.use_nodes = True
    tree = bpy.context.scene.node_tree
    render_layers = tree.nodes.new('CompositorNodeRLayers')
    depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
    depth_file_output.label = 'Depth Output'
    depth_file_output.file_slots[0].path = '../../' + output_image + '_depth'
    map = tree.nodes.new(type="CompositorNodeNormalize")  # thus, most distant points have pixel intensity of one, and nearest zero
    tree.links.new(render_layers.outputs['Depth'], map.inputs[0])
    tree.links.new(map.outputs[0], depth_file_output.inputs[0])

  # This will give ground-truth information about the scene and its objects
  scene_struct = {
      'split': output_split,
      'image_index': output_index,
      'image_filename': os.path.basename(output_image),
      'objects': [],
      'directions': {},
  }

  # Put a plane on the ground so we can compute cardinal directions
  bpy.ops.mesh.primitive_plane_add(radius=5)
  plane = bpy.context.object

  def rand(L):
    return 2.0 * L * (random.random() - 0.5)

  # Add random jitter to camera position
  if args.camera_jitter > 0:
    for i in range(3):
      bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

  # Figure out the left, up, and behind directions along the plane and record
  # them in the scene structure
  camera = bpy.data.objects['Camera']
  plane_normal = plane.data.vertices[0].normal
  cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
  cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
  cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
  plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
  plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
  plane_up = cam_up.project(plane_normal).normalized()

  # Delete the plane; we only used it for normals anyway. The base scene file
  # contains the actual ground plane.
  utils.delete_object(plane)

  # Save all six axis-aligned directions in the scene struct
  scene_struct['directions']['behind'] = tuple(plane_behind)
  scene_struct['directions']['front'] = tuple(-plane_behind)
  scene_struct['directions']['left'] = tuple(plane_left)
  scene_struct['directions']['right'] = tuple(-plane_left)
  scene_struct['directions']['above'] = tuple(plane_up)
  scene_struct['directions']['below'] = tuple(-plane_up)

  # Add random jitter to lamp positions
  if args.key_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
  if args.back_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
  if args.fill_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)

  # Now make some random objects
  objects, blender_objects = add_random_objects(scene_struct, num_objects, args, camera)

  if args.no_background:
    # This must come after add_random_objects, as that also changes the ground layer
    utils.set_layer(bpy.data.objects['Ground'], 2)
  else:
    # Note that in base_scene, the ground has no material (hence uses blender's default)
    bpy.data.materials.new(name='Ground_Material')
    ground_mat = bpy.data.materials['Ground_Material']
    background_intensity = args.background_intensities[random.randrange(len(args.background_intensities))]
    ground_mat.diffuse_color = [background_intensity] * 3
    bpy.data.objects['Ground'].data.materials.append(ground_mat)

  # Render the scene and dump the scene data structure
  scene_struct['objects'] = objects
  scene_struct['relationships'] = compute_all_relationships(scene_struct)
  while True:
    try:
      bpy.ops.render.render(write_still=True)
      break
    except Exception as e:
      print(e)

  if args.crop:
    maybe_crop(output_image)
    if args.output_depth:
      raise NotImplementedError

  if output_masks is not None:
    render_masks(blender_objects, output_masks)

  with open(output_scene, 'w') as f:
    json.dump(scene_struct, f, indent=2)

  if output_blendfile is not None:
    bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
コード例 #8
0
def render_scene(args,
                 num_objects=5,
                 num_images=0,
                 output_split='none',
                 image_template='render.png',
                 scene_template='render_json',
                 arr_template='arr',
                 output_blendfile=None,
                 directions={
                     1: 'no',
                     2: 'no',
                     3: 'no',
                     4: 'no',
                     5: 'no'
                 }):

    for object_name in bpy.data.objects.keys():
        if 'Sphere' in object_name or\
           'Cylinder' in object_name or\
           'Cube' in object_name or\
           'Duck' in object_name or\
           'Peg' in object_name or\
           'Disk' in object_name or\
           'Bowl' in object_name:
            utils.delete_object_by_name(object_name)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Add random jitter to camera position
    if args.camera_jitter > 0:
        for i in range(3):
            bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['Camera']
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    objects = []
    blender_objects = []
    direction_vec = []

    for scene_idx in range(num_images):

        image_path = image_template % (scene_idx + args.start_idx)
        render_args.filepath = image_path

        scene_path = scene_template % (scene_idx + args.start_idx)
        arr_path = arr_template % (scene_idx + args.start_idx)

        if scene_idx == 0:

            init_position = []
            final_position = []
            ref_obj_loc = np.array([0, 0, 1])

            for axis, movement in directions.items():
                # X
                if axis == 1:
                    if movement != ['no']:
                        if movement[0] == 'front':
                            init_position.append(-3)
                            final_position.append(2)
                        else:
                            init_position.append(2)
                            final_position.append(-3)
                    else:
                        tmp = random.choice(
                            np.linspace(-2, 2, 10, endpoint=True))
                        init_position.append(tmp)
                        final_position.append(tmp)

                # Y
                if axis == 0:
                    if movement != ['no']:
                        if movement[0] == 'left':
                            init_position.append(-2)
                            final_position.append(2)
                        else:
                            init_position.append(2)
                            final_position.append(-2)
                    else:
                        tmp = random.choice(
                            np.linspace(-2, 2, 10, endpoint=True))
                        init_position.append(tmp)
                        final_position.append(tmp)

                # Z
                if axis == 2:
                    if movement != ['no']:
                        if movement[0] == 'below':
                            init_position.append(0)
                            final_position.append(2)
                        else:
                            init_position.append(2)
                            final_position.append(0)
                    else:
                        tmp = random.choice(
                            np.linspace(0, 2, 10, endpoint=True))
                        init_position.append(tmp)
                        final_position.append(tmp)

                # CLOSE/FAR
                close_far_thresh = 3
                if axis == 3:
                    if movement != ['no']:
                        far_sample = []
                        close_sample = []
                        while far_sample == [] or \
                            np.linalg.norm(far_sample - ref_obj_loc) < close_far_thresh:

                            x = random.choice(
                                np.linspace(-2, 2, 10, endpoint=True))
                            y = random.choice(
                                np.linspace(-2, 2, 10, endpoint=True))
                            z = random.choice(
                                np.linspace(0.6, 2, 10, endpoint=True))
                            far_sample = np.array([x, y, z])

                        while close_sample == [] or \
                            np.linalg.norm(close_sample - ref_obj_loc) > close_far_thresh or \
                            np.linalg.norm(close_sample - ref_obj_loc) < 1:

                            x = random.choice(
                                np.linspace(-2, 2, 10, endpoint=True))
                            y = random.choice(
                                np.linspace(-2, 2, 10, endpoint=True))
                            z = random.choice(
                                np.linspace(0.6, 2, 10, endpoint=True))
                            close_sample = np.array([x, y, z])

                        if movement[0] == 'far':
                            init_position = far_sample
                            final_position = []
                            while final_position == [] or \
                                np.linalg.norm(final_position - ref_obj_loc) > close_far_thresh or \
                                np.linalg.norm(final_position - ref_obj_loc) < 1:
                                final_position = init_position + random.uniform(
                                    0.5, 1) * (ref_obj_loc - init_position)
                                print(final_position)
                        else:
                            init_position = close_sample
                            final_position = []
                            while final_position == [] or \
                                np.linalg.norm(final_position - ref_obj_loc) < close_far_thresh:
                                final_position = init_position + random.uniform(
                                    0.5, 1) * (init_position - ref_obj_loc)

                    else:
                        pass

            # init_position[1] = -3
            # init_position[2] = 0
            # final_position[2] = 0
            print(init_position)
            print(final_position)
            init_positions = []
            init_positions.append(init_position)
            init_positions.append(ref_obj_loc)
            for i in range(2, num_objects):
                x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                z = random.choice(np.linspace(0, 2, 6, endpoint=True))
                init_positions.append([x, y, z])

            direction_vec = np.array(final_position) - np.array(init_position)

            # Now make some random objects
            objects, blender_objects = add_random_objects(
                scene_struct, num_objects, args, camera, init_positions)

            if scene_idx == 0:
                movement = directions[4]
                if movement != ['no']:
                    far_sample = []
                    while far_sample == [] or \
                        np.linalg.norm(far_sample - ref_obj_loc) < close_far_thresh:

                        x = random.choice(np.linspace(-2, 2, 10,
                                                      endpoint=True))
                        y = random.choice(np.linspace(-2, 2, 10,
                                                      endpoint=True))
                        z = random.choice(
                            np.linspace(0.6, 2, 10, endpoint=True))
                        far_sample = np.array([x, y, z])

                    if movement[0] == 'off':
                        init_position = far_sample
                        final_position = np.array(blender_objects[1].location)
                        final_position[0] += random.uniform(
                            -objects[1]['r'], objects[1]['r'])
                        final_position[1] += random.uniform(
                            -objects[1]['r'], objects[1]['r'])
                        final_position[2] += objects[0]['r'] + objects[1]['r']

                        blender_objects[0].location[0] = init_position[0]
                        blender_objects[0].location[1] = init_position[1]
                        blender_objects[0].location[2] = init_position[2]
                    else:
                        final_position = far_sample
                        init_position = np.array(blender_objects[1].location)
                        init_position[0] += random.uniform(
                            -objects[1]['r'], objects[1]['r'])
                        init_position[1] += random.uniform(
                            -objects[1]['r'], objects[1]['r'])
                        init_position[2] += objects[0]['r'] + objects[1]['r']

                        blender_objects[0].location[0] = init_position[0]
                        blender_objects[0].location[1] = init_position[1]
                        blender_objects[0].location[2] = init_position[2]

                else:
                    pass

            direction_vec = np.array(final_position) - np.array(init_position)

        else:
            move_obj_idxs = [0]

            step = direction_vec / args.num_images

            for move_obj_idx in move_obj_idxs:
                move_object(blender_objects[move_obj_idx], step)
                pixel_coords = utils.get_camera_coords(
                    camera, blender_objects[move_obj_idx].location)
                objects[move_obj_idx]['pixel_coords'] = pixel_coords

                # <Vector (-1.6002, -1.5445, 1.9500)>
                objects[move_obj_idx]['3d_coords'] = list(
                    blender_objects[move_obj_idx].location)

                # <Euler (x=0.0000, y=0.0000, z=139.0579), order='XYZ'>
                objects[move_obj_idx]['rotation'] = blender_objects[
                    move_obj_idx].rotation_euler[2]

        ### get b_box
        box_dict = get_b_box.main(bpy.context, blender_objects)
        for _id in box_dict:
            objects[_id]['bbox'] = box_dict[_id]

        # Render the scene and dump the scene data structure
        scene_struct['objects'] = objects
        scene_struct['relationships'] = compute_all_relationships(scene_struct)

        ############ ADDED ############
        tree = bpy.context.scene.node_tree
        links = tree.links
        rl = tree.nodes['Render Layers']
        v = tree.nodes['Viewer']

        links.new(rl.outputs[0], v.inputs[0])
        while True:
            try:
                bpy.ops.render.render(write_still=True)
                break
            except Exception as e:
                print(e)
        links.remove(links[0])

        # get viewer pixels
        rgb_pixels = bpy.data.images['Viewer Node'].pixels
        rgb_pixels = np.array(rgb_pixels[:])
        rgb_pixels = np.power(rgb_pixels, 1 / 2.2)
        rgb_pixels[rgb_pixels > 1] = 1
        rgb_pixels = rgb_pixels.reshape(args.height, args.width, 4)[..., :3]

        links.new(rl.outputs[2], v.inputs[0])
        render_shadeless(blender_objects, lights_off=False)
        links.remove(links[0])

        # get viewer pixels
        depth_pixels = bpy.data.images['Viewer Node'].pixels
        depth_pixels = np.array(depth_pixels[:])
        depth_pixels = depth_pixels.reshape(args.height, args.width, 4)[..., 0,
                                                                        None]

        links.new(rl.outputs[0], v.inputs[0])
        render_shadeless(blender_objects)
        links.remove(links[0])

        # get viewer pixels
        mask_pixels = bpy.data.images['Viewer Node'].pixels
        mask_pixels = np.array(mask_pixels[:])
        mask_pixels = mask_pixels.reshape(args.height, args.width, 4)[..., :3]

        pixels = np.concatenate((rgb_pixels, depth_pixels, mask_pixels),
                                axis=2)
        pixels = np.flipud(pixels)

        utils.save_arr(pixels, arr_path)
        ############ ADDED ############

        with open(scene_path, 'w') as f:
            json.dump(scene_struct, f, indent=2)
コード例 #9
0
def render_scene(args,
    num_objects=5,
    num_images=0,
    output_split='none',
    image_template='render.png',
    scene_template='render_json',
    arr_template='arr',
    output_blendfile=None,
    directions={1: 'no', 2: 'no', 3: 'no', 4: 'no', 5: 'no', 6: 'no'}
  ):
  
  for object_name in bpy.data.objects.keys():
    if 'Sphere' in object_name or\
       'Cylinder' in object_name or\
       'Cube' in object_name or\
       'Duck' in object_name or\
       'Peg' in object_name or\
       'Disk' in object_name or\
       'Bowl' in object_name:
       utils.delete_object_by_name(object_name)

  # Load materials
  utils.load_materials(args.material_dir)

  # Set render arguments so we can get pixel coordinates later.
  # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
  # cannot be used.
  render_args = bpy.context.scene.render
  render_args.engine = "CYCLES"
  render_args.resolution_x = args.width
  render_args.resolution_y = args.height
  render_args.resolution_percentage = 100
  render_args.tile_x = args.render_tile_size
  render_args.tile_y = args.render_tile_size
  if args.use_gpu == 1:
    # Blender changed the API for enabling CUDA at some point
    if bpy.app.version < (2, 78, 0):
      bpy.context.user_preferences.system.compute_device_type = 'CUDA'
      bpy.context.user_preferences.system.compute_device = 'CUDA_0'
    else:
      cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
      cycles_prefs.compute_device_type = 'CUDA'

  # Some CYCLES-specific stuff
  bpy.data.worlds['World'].cycles.sample_as_light = True
  bpy.context.scene.cycles.blur_glossy = 2.0
  bpy.context.scene.cycles.samples = args.render_num_samples
  bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
  bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
  if args.use_gpu == 1:
    bpy.context.scene.cycles.device = 'GPU'

  # This will give ground-truth information about the scene and its objects
  scene_struct = {
      'split': output_split,
      'objects': [],
      'directions': {},
  }

  # Put a plane on the ground so we can compute cardinal directions
  bpy.ops.mesh.primitive_plane_add(radius=5)
  plane = bpy.context.object

  def rand(L):
    return 2.0 * L * (random.random() - 0.5)

  # Add random jitter to camera position
  if args.camera_jitter > 0:
    for i in range(3):
      bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

  # Figure out the left, up, and behind directions along the plane and record
  # them in the scene structure
  camera = bpy.data.objects['Camera']
  plane_normal = plane.data.vertices[0].normal
  cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
  cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
  cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
  plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
  plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
  plane_up = cam_up.project(plane_normal).normalized()

  # Delete the plane; we only used it for normals anyway. The base scene file
  # contains the actual ground plane.
  utils.delete_object(plane)

  # Save all six axis-aligned directions in the scene struct
  scene_struct['directions']['behind'] = tuple(plane_behind)
  scene_struct['directions']['front'] = tuple(-plane_behind)
  scene_struct['directions']['left'] = tuple(plane_left)
  scene_struct['directions']['right'] = tuple(-plane_left)
  scene_struct['directions']['above'] = tuple(plane_up)
  scene_struct['directions']['below'] = tuple(-plane_up)

  # Add random jitter to lamp positions
  if args.key_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
  if args.back_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
  if args.fill_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)


  objects = []
  blender_objects = []
  direction_vec = []
  traj = []

  for scene_idx in range(num_images):

    image_path = image_template % (scene_idx + args.start_idx)
    render_args.filepath = image_path
  
    scene_path = scene_template % (scene_idx + args.start_idx)
    arr_path = arr_template % (scene_idx + args.start_idx)

    if scene_idx == 0:

      init_positions = []
      # ref_obj_loc = np.array([0, -1, 0])
      ref_obj_loc = np.array([0, 0, 0])
      init_positions.append(ref_obj_loc)
      sizes = [('large', 0.6)]
      # sizes = [('small', 0.3)]
      inout=False



      for obj_idx in range(1, args.max_objects):
        init_position = []
        for axis, movement in directions.items():
          # X
          if axis == 1:
            if movement != ['no']:
              if movement[0] == 'front':
                init_position.append(-3)
              else:
                init_position.append(2)
            else:
              tmp = random.choice(np.linspace(-2, 2, 10, endpoint=True))
              while np.array([abs(tmp - posit[1]) < args.margin for posit in init_positions]).any():
                tmp = random.choice(np.linspace(-2, 2, 10, endpoint=True))
              init_position.append(tmp)
          
          # Y
          if axis == 0:
            if movement != ['no']:
              if movement[0] == 'left':
                init_position.append(-2)
              else:
                init_position.append(2)
            else:
              tmp = random.choice(np.linspace(-2, 2, 10, endpoint=True))
              while np.array([abs(tmp - posit[0]) < args.margin for posit in init_positions]).any():
                tmp = random.choice(np.linspace(-2, 2, 10, endpoint=True))
              init_position.append(tmp)

          # Z
          if axis == 2:
            if movement != ['no']:
              if movement[0] == 'below':
                init_position.append(0)
              else:
                init_position.append(2)
            else:
              tmp = random.choice(np.linspace(0, 2, 10, endpoint=True))
              # while np.array([abs(tmp - posit[2]) < args.margin for posit in init_positions]).any():
              while abs(tmp - init_positions[0][2]) < args.margin:
                tmp = random.choice(np.linspace(0, 2, 10, endpoint=True))
              init_position.append(tmp)

          # CLOSE/FAR
          close_far_thresh = 3
          if axis == 3:
            if movement != ['no']:
              far_sample = []
              close_sample = []
              while far_sample == [] or \
                  np.linalg.norm(far_sample - ref_obj_loc) < close_far_thresh or \
                  np.array([np.linalg.norm(far_sample - posit) < args.min_dist for posit in init_positions]).any():

                    x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    z = random.choice(np.linspace(0.6, 2, 10, endpoint=True))
                    far_sample = np.array([x, y, z])

              while close_sample == [] or \
                  np.linalg.norm(close_sample - ref_obj_loc) > close_far_thresh or \
                  np.linalg.norm(close_sample - ref_obj_loc) < 1:

                    x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    z = random.choice(np.linspace(0.6, 2, 10, endpoint=True))
                    close_sample = np.array([x, y, z])

              if movement[0] == 'far':
                init_position = far_sample

              else:
                init_position = close_sample
            else:
              pass

          # ON/OFF
          close_far_thresh = 1
          if axis == 4:
            if movement != ['no']:
              # size_mapping = [('small', 0.35), ('large', 0.7)]
              size_mapping = [('small', 0.5)]
              off_sample = []
              on_sample = []
              while off_sample == [] or \
                  np.linalg.norm(off_sample - ref_obj_loc) < close_far_thresh or \
                  np.array([np.linalg.norm(off_sample - posit) < args.min_dist for posit in init_positions]).any():

                    x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    # z = random.choice(np.linspace(0.6, 2, 10, endpoint=True))
                    z = 0
                    off_sample = np.array([x, y, z])

              x = init_positions[-1][0] + random.uniform(-0.2, 0.2)
              y = init_positions[-1][1] + random.uniform(-0.2, 0.2)
              size_name, r = random.choice(size_mapping)
              sizes.append((size_name, r))
              z = init_positions[-1][2] + sizes[-1][1] * 2

              on_sample = np.array([x, y, z])

              if movement[0] == 'off':
                init_position = off_sample

              else:
                init_position = on_sample
            else:
              pass

          # IN/OUT
          close_far_thresh = 1
          if axis == 5:
            if movement != ['no']:
              inout = True
              out_sample = []
              in_sample = []
              while out_sample == [] or \
                  np.linalg.norm(out_sample - ref_obj_loc) < close_far_thresh or \
                  np.array([np.linalg.norm(out_sample - posit) < args.min_dist for posit in init_positions]).any():

                    x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    # y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    y = -3 + random.uniform(-0.2, 0.2)
                    # z = random.choice(np.linspace(0.6, 2, 10, endpoint=True))
                    z = 0
                    out_sample = np.array([x, y, z])

              # if obj_idx == 1:
              #   offset = -1
              # else:
              #   offset = 1

              offsets = [[-1, -1], [-1, 1], [1, -1], [1, 1]]

              # x = init_positions[0][0] + offset
              # y = init_positions[0][1] + random.choice(np.linspace(-0.2, 0.2, 2, endpoint=True))
              x, y = offsets[obj_idx - 1]
              x += random.uniform(-0.1, 0.1)
              y += random.uniform(-0.1, 0.1)

              sizes.append(('small', 0.3))
              z = init_positions[0][2]

              in_sample = np.array([x, y, z])

              if movement[0] == 'out':
                init_position = out_sample

              else:
                init_position = in_sample
            else:
              pass

        init_positions.append(init_position)

      # FACILITATE STACKING
      if directions[4][0] != 'no' and directions[4][0] == 'on':
        init_positions = init_positions[:1] + init_positions[1:][::-1]




      print(init_positions)
      # Now make some random objects
      objects, blender_objects = add_random_objects(scene_struct, num_objects, args, camera, \
                                                    init_positions, sizes=sizes, inout=inout)





      init_positions = []
      for obj in blender_objects:
        init_positions.append(np.array(obj.location))

      final_positions = []
      ref_obj_loc = init_positions[0]
      final_positions.append(ref_obj_loc)

      curvy = False

      for obj_idx in range(1, args.max_objects):
        final_position = []
        for axis, movement in directions.items():
          # X
          if axis == 1:
            if movement != ['no']:
              if movement[0] == 'front':
                final_position.append(2)
              else:
                final_position.append(-3)
            else:
              final_position.append(np.array(blender_objects[obj_idx].location[1]))
          
          # Y
          if axis == 0:
            if movement != ['no']:
              if movement[0] == 'left':
                final_position.append(2)
              else:
                final_position.append(-2)
            else:
              final_position.append(np.array(blender_objects[obj_idx].location[0]))

          # Z
          if axis == 2:
            if movement != ['no']:
              if movement[0] == 'below':
                final_position.append(2)
              else:
                final_position.append(objects[obj]['r'])
            else:
              final_position.append(np.array(blender_objects[obj_idx].location[2]))

          # CLOSE/FAR
          close_far_thresh = 3
          if axis == 3:
            if movement != ['no']:
              far_sample = []
              close_sample = []
              while far_sample == [] or \
                  np.linalg.norm(far_sample - ref_obj_loc) < close_far_thresh or \
                  np.array([np.linalg.norm(far_sample - posit) < args.min_dist for posit in final_positions]).any():

                    x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    z = random.choice(np.linspace(0.6, 2, 10, endpoint=True))
                    far_sample = np.array([x, y, z])

              while close_sample == [] or \
                  np.linalg.norm(close_sample - ref_obj_loc) > close_far_thresh or \
                  np.linalg.norm(close_sample - ref_obj_loc) < 1 or \
                  np.array([np.linalg.norm(close_sample - posit) < args.min_dist for posit in final_positions]).any():

                    x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    z = random.choice(np.linspace(0.6, 2, 10, endpoint=True))
                    close_sample = np.array([x, y, z])

              if movement[0] == 'far':
                final_position = close_sample
              else:
                final_position = far_sample
            else:
              pass

          # ON/OFF
          close_far_thresh = 1
          if axis == 4:
            if movement != ['no']:
              curvy = True
              off_sample = []
              on_sample = []
              while off_sample == [] or \
                    np.linalg.norm(off_sample - ref_obj_loc) < close_far_thresh or \
                  np.array([np.linalg.norm(off_sample - posit) < args.min_dist for posit in final_positions]).any():

                    x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    # z = random.choice(np.linspace(0.6, 2, 10, endpoint=True))
                    z = 0
                    off_sample = np.array([x, y, z])

              x = final_positions[-1][0] + random.uniform(-0.2, 0.2)
              y = final_positions[-1][1] + random.uniform(-0.2, 0.2)
              
              r_prev = objects[obj_idx-1]['r']
              r = objects[obj_idx]['r']
              z = final_positions[-1][2] + r_prev + r

              on_sample = np.array([x, y, z])

              if movement[0] == 'off':
                final_position = on_sample

              else:
                final_position = off_sample
            else:
              pass

          # IN/OUT
          close_far_thresh = 1
          if axis == 5:
            if movement != ['no']:
              curvy = True
              out_sample = []
              in_sample = []
              while out_sample == [] or \
                  np.linalg.norm(out_sample - ref_obj_loc) < close_far_thresh or \
                  np.array([np.linalg.norm(out_sample - posit) < args.min_dist for posit in final_positions]).any():

                    x = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    # y = random.choice(np.linspace(-2, 2, 10, endpoint=True))
                    y = -3 + random.uniform(-0.2, 0.2)
                    # z = random.choice(np.linspace(0.6, 2, 10, endpoint=True))
                    z = 0
                    out_sample = np.array([x, y, z])

              # if obj_idx == 1:
              #   offset = -1
              # else:
              #   offset = 1
              # x = final_positions[0][0] + offset
              # y = final_positions[0][1] + random.choice(np.linspace(-0.2, 0.2, 2, endpoint=True))

              #offsets = [[-1, -1], [-1, 1], [1, -1], [1, 1]]
              offsets = [[-0.5, -0.5], [-0.5, 0.5], [0.5, -0.5], [0.5, 0.5]]

              x, y = offsets[obj_idx - 1]
              # x, y = [0, 0]
              x += random.uniform(-0.1, 0.1)
              y += random.uniform(-0.1, 0.1)

              r = objects[obj_idx]['r']
              z = final_positions[0][2] + r

              in_sample = np.array([x, y, z])

              if movement[0] == 'out':
                final_position = in_sample

              else:
                final_position = out_sample
            else:
              pass

        final_positions.append(final_position)

      traj = calculate_trajectory(init_positions, final_positions, args, curvy=curvy)

    else:
      move_obj_idx, pos = traj[scene_idx-1]

      move_object(blender_objects[move_obj_idx], pos)
      
      pixel_coords = utils.get_camera_coords(camera, blender_objects[move_obj_idx].location)
      objects[move_obj_idx]['pixel_coords'] = pixel_coords

      # <Vector (-1.6002, -1.5445, 1.9500)>
      objects[move_obj_idx]['3d_coords'] = list(blender_objects[move_obj_idx].location)
      
      # <Euler (x=0.0000, y=0.0000, z=139.0579), order='XYZ'>
      objects[move_obj_idx]['rotation'] = blender_objects[move_obj_idx].rotation_euler[2]






    ### get b_box
    box_dict = get_b_box.main(bpy.context, blender_objects)
    for _id in box_dict:
      objects[_id]['bbox'] = box_dict[_id]

    # Render the scene and dump the scene data structure
    scene_struct['objects'] = objects
    scene_struct['relationships'] = compute_all_relationships(scene_struct)


    tree = bpy.context.scene.node_tree
    links = tree.links
    rl = tree.nodes['Render Layers']
    v = tree.nodes['Viewer']

    links.new(rl.outputs[0], v.inputs[0])
    while True:
      try:
        bpy.ops.render.render(write_still=True)
        break
      except Exception as e:
        print(e)
    links.remove(links[0])

    # get viewer pixels
    rgb_pixels = bpy.data.images['Viewer Node'].pixels
    rgb_pixels = np.array(rgb_pixels[:])
    rgb_pixels = np.power(rgb_pixels, 1/2.2)
    rgb_pixels[rgb_pixels > 1] = 1
    rgb_pixels = rgb_pixels.reshape(args.height, args.width, 4)[...,:3]

    links.new(rl.outputs[2], v.inputs[0])
    render_shadeless(blender_objects, lights_off=False)
    links.remove(links[0])

    # get viewer pixels
    depth_pixels = bpy.data.images['Viewer Node'].pixels
    depth_pixels = np.array(depth_pixels[:])
    depth_pixels = depth_pixels.reshape(args.height, args.width, 4)[...,0, None]


    links.new(rl.outputs[0], v.inputs[0])
    render_shadeless(blender_objects)
    links.remove(links[0])

    # get viewer pixels
    mask_pixels = bpy.data.images['Viewer Node'].pixels
    mask_pixels = np.array(mask_pixels[:])
    mask_pixels = mask_pixels.reshape(args.height, args.width, 4)[...,:3]

    pixels = np.concatenate((rgb_pixels, depth_pixels, mask_pixels), axis=2)
    pixels = np.flipud(pixels)

    utils.save_arr(pixels, arr_path)

    with open(scene_path, 'w') as f:
      json.dump(scene_struct, f, indent=2)
コード例 #10
0
def render_scene(args,
                 num_objects=5,
                 output_index=0,
                 output_split='none',
                 output_image='render.png',
                 output_scene='render_json',
                 output_arr='arr',
                 output_blendfile=None,
                 scene_idx=0):

    for object_name in bpy.data.objects.keys():
        if 'Sphere' in object_name or\
           'Cylinder' in object_name or\
           'Cube' in object_name or\
           'Duck' in object_name or\
           'Peg' in object_name or\
           'Disk' in object_name or\
           'Bowl' in object_name or\
           'Tray' in object_name:
            utils.delete_object_by_name(object_name)

    # Load the main blendfile
    # bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    scene_struct = {}
    render_args = bpy.context.scene.render
    render_args.filepath = output_image

    if scene_idx == 0:
        # Load materials
        utils.load_materials(args.material_dir)

        # Set render arguments so we can get pixel coordinates later.
        # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
        # cannot be used.
        render_args.engine = "CYCLES"
        render_args.resolution_x = args.width
        render_args.resolution_y = args.height
        render_args.resolution_percentage = 100
        render_args.tile_x = args.render_tile_size
        render_args.tile_y = args.render_tile_size
        if args.use_gpu == 1:
            # Blender changed the API for enabling CUDA at some point
            if bpy.app.version < (2, 78, 0):
                bpy.context.user_preferences.system.compute_device_type = 'CUDA'
                bpy.context.user_preferences.system.compute_device = 'CUDA_0'
            else:
                cycles_prefs = bpy.context.user_preferences.addons[
                    'cycles'].preferences
                cycles_prefs.compute_device_type = 'CUDA'

        # Some CYCLES-specific stuff
        bpy.data.worlds['World'].cycles.sample_as_light = True
        bpy.context.scene.cycles.blur_glossy = 2.0
        bpy.context.scene.cycles.samples = args.render_num_samples
        bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
        bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
        if args.use_gpu == 1:
            bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'image_index': output_index,
        'image_filename': os.path.basename(output_image),
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Add random jitter to camera position
    if args.camera_jitter > 0:
        for i in range(3):
            bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['Camera']
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    # Now make some random objects
    objects, blender_objects = add_random_objects(scene_struct, num_objects,
                                                  args, camera, scene_idx)

    ### get b_box
    box_dict = get_b_box.main(bpy.context, blender_objects)
    for _id in box_dict:
        objects[_id]['bbox'] = box_dict[_id]

    # Render the scene and dump the scene data structure
    scene_struct['objects'] = objects
    scene_struct['relationships'] = compute_all_relationships(scene_struct)

    tree = bpy.context.scene.node_tree
    links = tree.links
    rl = tree.nodes['Render Layers']
    v = tree.nodes['Viewer']

    links.new(rl.outputs[0], v.inputs[0])
    while True:
        try:
            bpy.ops.render.render(write_still=True)
            break
        except Exception as e:
            print(e)
    links.remove(links[0])

    # get viewer pixels
    rgb_pixels = bpy.data.images['Viewer Node'].pixels
    rgb_pixels = np.array(rgb_pixels[:])
    rgb_pixels = np.power(rgb_pixels, 1 / 2.2)
    rgb_pixels[rgb_pixels > 1] = 1
    rgb_pixels = rgb_pixels.reshape(args.height, args.width, 4)[..., :3]

    links.new(rl.outputs[2], v.inputs[0])
    render_shadeless(blender_objects, lights_off=False)
    links.remove(links[0])

    # get viewer pixels
    depth_pixels = bpy.data.images['Viewer Node'].pixels
    depth_pixels = np.array(depth_pixels[:])
    depth_pixels = depth_pixels.reshape(args.height, args.width, 4)[..., 0,
                                                                    None]

    links.new(rl.outputs[0], v.inputs[0])
    render_shadeless(blender_objects)
    links.remove(links[0])

    # get viewer pixels
    mask_pixels = bpy.data.images['Viewer Node'].pixels
    mask_pixels = np.array(mask_pixels[:])
    mask_pixels = mask_pixels.reshape(args.height, args.width, 4)[..., :3]

    pixels = np.concatenate((rgb_pixels, depth_pixels, mask_pixels), axis=2)
    pixels = np.flipud(pixels)

    utils.save_arr(pixels, output_arr)

    with open(output_scene, 'w') as f:
        json.dump(scene_struct, f, indent=2)

    if output_blendfile is not None:
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
コード例 #11
0
def render_scene(
    args,
    num_objects=5,
    output_index=0,
    output_split='none',
    output_image='render.png',
    output_scene='render_json',
    output_blendfile=None,
):

    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    view_struct = {}
    if args.multi_view:
        cams = [obj for obj in bpy.data.objects if obj.type == 'CAMERA']
        poses = [cam.location for cam in cams]

        print(poses)
        if args.random_views:

            num_samples = 20
            # generate the points on a circle of radius r around the scene
            theta = [uniform(0, 2 * PI) for _ in range(num_samples)]
            r = 10
            x = [r * math.cos(t) for t in theta]
            y = [r * math.sin(t) for t in theta]

            scn = bpy.context.scene
            origin_empty = cams[0].constraints[0].target
            cams = [cams[0]]
            for i in range(num_samples):

                # create the first camera
                cam = bpy.data.cameras.new("cam" + str(i))

                # create the first camera object
                cam_obj = bpy.data.objects.new("cam" + str(i), cam)
                cam_obj.location = (x[i], y[i], 6.0)

                m = cam_obj.constraints.new('TRACK_TO')
                m.target = origin_empty
                m.track_axis = 'TRACK_NEGATIVE_Z'
                m.up_axis = 'UP_Y'
                scn.objects.link(cam_obj)
                cams.append(cam_obj)
    else:
        cams = [obj for obj in bpy.data.objects if obj.name == 'cc']
    if args.multi_view:
        bpy.context.scene.update()

        for idx, cam in enumerate(cams):

            path_dir = bpy.context.scene.render.filepath
            path = ".".join(path_dir.split(".")[:-1]) + "_" + cam.name + ".png"
            # 6 total parameters defining the xyz location and xyz rotation (in radians) of the camera

            cam_params = list(cam.location[:]) + list(
                cam.matrix_world.to_euler('XYZ')[:])
            view_struct[cam.name] = {
                'split': output_split,
                'image_index': output_index + idx,
                'image_filename': os.path.basename(path.split("/")[-1]),
                'objects': [],
                'directions': {},
                'cam_params': cam_params
            }
    else:
        cam_params = list(cams[0].location[:]) + list(
            cams[0].rotation_euler[:])
        view_struct['cc'] = {
            'split': output_split,
            'image_index': output_index,
            'image_filename': os.path.basename(output_image),
            'objects': [],
            'directions': {},
            'cam_params': cam_params
        }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random() - 0.5)

    # Add random jitter to camera position
    if args.camera_jitter > 0:
        for cam in cams:
            for i in range(3):
                cams[0].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['cc']
    #Quaternion((0.781359076499939, 0.46651220321655273, 0.2125076949596405, 0.3559281527996063))
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    view_struct['cc']['directions']['behind'] = tuple(plane_behind)
    view_struct['cc']['directions']['front'] = tuple(-plane_behind)
    view_struct['cc']['directions']['left'] = tuple(plane_left)
    view_struct['cc']['directions']['right'] = tuple(-plane_left)
    view_struct['cc']['directions']['above'] = tuple(plane_up)
    view_struct['cc']['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    # Now make some random objects
    texts, blender_texts, objects, blender_objects = add_random_objects(
        view_struct, num_objects, args, cams)

    if args.shadow_less:
        for obj in blender_objects:
            bpy.context.scene.objects.active = obj
            bpy.context.object.cycles_visibility.shadow = False

    # Render the scene and dump the scene data structure
    for cam in cams:
        view_struct[cam.name]['objects'] = objects[cam.name]
        view_struct[cam.name]['texts'] = texts
        view_struct[cam.name]['relationships'] = compute_all_relationships(
            view_struct)
    while True:
        try:
            path_dir = bpy.context.scene.render.filepath  # save for restore
            if args.multi_view:
                for cam in cams:
                    bpy.context.scene.camera = cam
                    bpy.context.scene.render.filepath = ".".join(
                        path_dir.split(".")[:-1]) + "_" + cam.name + ".png"
                    bpy.ops.render.render(write_still=True)
                    bpy.context.scene.render.filepath = path_dir
            else:
                bpy.ops.render.render(write_still=True)
            break

        except Exception as e:
            print(e)

    with open(output_scene, 'w') as f:
        json.dump(view_struct, f, indent=2)

    if args.save_blendfiles:
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
コード例 #12
0
def render_scene(args,
                 num_objects=5,
                 output_index=0,
                 output_split='none',
                 output_image='render.png',
                 output_folder='render',
                 output_scene='render_json',
                 output_blendfile=None,
                 output_obj_folder=None,
                 num_imgs_per_scene=50):

    if not os.path.isdir(output_obj_folder):
        os.makedirs(output_obj_folder)

    # TEST CASE: PLace one object only
    # num_objects = 1

    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)
    ground_plane = bpy.context.object

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.image_settings.color_mode = 'RGBA'
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'image_index': output_index,
        'image_filename': os.path.basename(output_image),
        'image_folder': os.path.basename(output_folder),
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Get camera location and rotation to move in a 'viewing spiral' per scene
    camera = bpy.data.objects["Camera"]
    angle_x = camera.data.angle_x
    # printing camera parameters for debugging
    # print("camera sensor height: ", camera.data.sensor_height, ",camera sensor width: ", camera.data.sensor_width, ",camera focal length: ", camera.data.lens,",and camera angle_x (as used in NeRF): ", angle_x)
    transforms = dict()
    transforms.update({"camera_angle_x": angle_x})

    # # Add random jitter to camera position
    # if args.camera_jitter > 0:
    #   for i in range(3):
    #     bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    # camera = bpy.data.objects['Camera']
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    # Now make some random objects
    objects, blender_objects = add_random_objects(scene_struct, num_objects,
                                                  args, camera)
    # Objects is a dict of scene obj properties(shape, color, etc) and blender_objects is a list of mesh objects in scene

    # We want objects in .obj format, for PIFu: for this, we group rendered objects
    # together and save geometry to .obj. However, this also groups the ground plane
    # with the objects, which is unnecessary.

    # Delete ground plane, to make grouping for .obj easier
    utils.delete_object(ground_plane)

    vol, vol_obj = 0.0, 0.0
    # Group together meshes
    bpy.ops.group.create(name="meshGroup")
    mesh_list = _register_meshes()
    for ob in mesh_list:
        bpy.context.scene.objects[ob.name].select = True
        bpy.ops.object.group_link(group="meshGroup")
        bm, is_negative = get_mesh_from_obj(ob)
        bpy.context.scene.objects[ob.name].select = False

        vol_obj = bm.calc_volume(signed=True)  # Get volume for a single object
        print("debugging vol func: ", vol_obj, " for is_neg flag: ",
              is_negative)
        if is_negative:
            vol_obj = -vol_obj
        bm.free()
        vol += vol_obj

    print("Volume of scene: ", vol)
    # At the end of this, the central sphere is the active object, as seen by bpy.context.object
    # Camera viewing angles
    target_obj = bpy.context.object
    base_dist = (target_obj.location.xy - camera.location.xy).length
    print("base dist", base_dist)

    t_list = np.linspace(0, 1, num_imgs_per_scene)
    # Generate frames (list of dictionaries) for transforms. json file (nerf)
    frames = []
    """
  Leaving nb of images per scene to same as nerf's datasets. 
  """

    # for j in range(num_imgs_per_scene):
    #   img = os.path.join(output_folder, output_image) % j
    #   render_args.filepath = img   # absolute file path for output img being rendered
    #   t = t_list[j]
    #   azimuth = 180 + (-180 * t + (1 - t) * 180)  # range of azimuth: 0-360 deg
    #   elevation = 15 * t + (1 - t) * 75        # range of elevation: 15-75 deg
    #   #jitter_t = np.random.rand()   # Jitter the viewing sphere
    #   #jitter = -args.camera_jitter * (1 - jitter_t) + jitter_t * args.camera_jitter
    #   dist = base_dist # + jitter

    #   cam_loc = camera.location
    #   cam_rot = camera.rotation_euler
    #   print("Printnig loc", cam_loc, "and rot", cam_rot)

    #   transformation_matrix = get_camera_matrix(azimuth, elevation, dist)

    #   # create dictionary to append to frames
    #   img_name = img.split('.')
    #   img_name = img_name[:-1][-1]
    #   scene_tranformation_dict = dict()
    #   scene_tranformation_dict = {"file_path": img_name, "rotation": np.random.random(), "transform_matrix": transformation_matrix.tolist()}
    #   # print("scene transformation dict", scene_tranformation_dict)
    #   frames.append(scene_tranformation_dict)

    # set camera location and rotation
    # This is disabled to read poses from nerf's dataset
    # location, rotation = config_cam(
    #     math.radians(azimuth), math.radians(elevation), dist
    # )

    # NOTE: SINCE THE DATASET GENERATED IS UNIQUE IN EACH INSTANCE, WE NEED TO GENERATE IMAGES FOR TRAIN, TEST AND VAL IN ONE GO. NOT DOING SO CAN RESULT IN THE SCENE CHANGING BETWEEN TWO FOLDERS
    # TRAIN AND VAL DATASETS FOR LEGO HAVE 100 IMGS EACH, BUT TEST HAS 200 IMGS.
    filetype_list = ['train', 'test', 'val']
    for filetype in filetype_list:
        save_folder = os.path.join(output_folder, filetype)
        if not os.path.exists(save_folder):
            os.makedirs(save_folder)
        print("output folder path", save_folder)
        pose_path = os.path.join(output_folder,
                                 'nerf_cams_{}.npz'.format(filetype))
        poses = np.load(pose_path)

        for j in range(len(poses['set_rot'])):
            img = os.path.join(save_folder, output_image) % j
            render_args.filepath = img
            rot = poses['set_rot'][j]
            t = poses['set_loc'][j]
            camera.location = Vector(t)
            camera.rotation_euler = Vector(rot)

            # Render the scene and dump the scene data structure
            scene_struct['objects'] = objects
            scene_struct['relationships'] = compute_all_relationships(
                scene_struct)
            while True:
                # set background to transparent when rendering
                bpy.types.CyclesRenderSettings.film_transparent = True
                try:
                    bpy.ops.render.render(write_still=True)
                    break
                except Exception as e:
                    print(e)

            obj_file_template = '%%0%dd.obj' % 6  # num_digits hardcoded here
            print(" export scene obj path: ",
                  os.path.join(output_obj_folder, obj_file_template) % j)
            bpy.ops.export_scene.obj(
                filepath=os.path.join(output_obj_folder, obj_file_template) %
                j)

            with open(output_scene, 'w') as f:
                json.dump(scene_struct, f, indent=2)

            bpy.ops.object.select_all(action='DESELECT')

            if output_blendfile is not None:
                bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)

        # Update transforms dict with frame information and dump into json file
        transforms.update({"frames": frames})

    # NeRF specific work
    train_path = './train'
    if os.path.isdir(train_path):
        train_file = os.path.join(train_path, 'transforms_train.json')
    else:
        train_path = os.mkdir(train_path)
        train_file = os.path.join(train_path, 'transforms_train.json')
    with open(train_file, 'w') as outfile:
        json.dump(transforms, outfile)

    return vol
コード例 #13
0
def render_scene(args,
                 output_index=0,
                 output_split='none',
                 output_image='render.png',
                 output_scene='render_json',
                 output_blendfile=None,
                 change_cam_loc=False,
                 objects=[],
                 add_noise=False,
                 new_cam_loc=None,
                 new_rot=None):
    if not add_noise:

        def rand(L):
            return 2.0 * L * (random.random() - 0.5)

        # Load the main blendfile
        bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

        # Load materials
        utils.load_materials(args.material_dir)

        # Set render arguments so we can get pixel coordinates later.
        # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
        # cannot be used.
        render_args = bpy.context.scene.render
        render_args.engine = "CYCLES"
        render_args.filepath = output_image
        render_args.resolution_x = args.width
        render_args.resolution_y = args.height
        render_args.resolution_percentage = 100
        render_args.tile_x = args.render_tile_size
        render_args.tile_y = args.render_tile_size
        if args.use_gpu == 1:
            # Blender changed the API for enabling CUDA at some point
            if bpy.app.version < (2, 78, 0):
                bpy.context.user_preferences.system.compute_device_type = 'CUDA'
                bpy.context.user_preferences.system.compute_device = 'CUDA_0'
            else:
                cycles_prefs = bpy.context.user_preferences.addons[
                    'cycles'].preferences
                cycles_prefs.compute_device_type = 'CUDA'

        # Some CYCLES-specific stuff
        bpy.data.worlds['World'].cycles.sample_as_light = True
        bpy.context.scene.cycles.blur_glossy = 2.0
        bpy.context.scene.cycles.samples = args.render_num_samples
        bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
        bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
        if args.use_gpu == 1:
            bpy.context.scene.cycles.device = 'GPU'

        # This will give ground-truth information about the scene and its objects
        scene_struct = {
            'split': output_split,
            'image_index': output_index,
            'image_filename': os.path.basename(output_image),
            'objects': [],
            'directions': {},
        }

        # Put a plane on the ground so we can compute cardinal directions
        bpy.ops.mesh.primitive_plane_add(radius=5)
        plane = bpy.context.object

        camera = bpy.data.objects['Camera']
        if new_cam_loc is not None:
            camera.location = new_cam_loc
        if new_rot is not None:
            camera.rotation_euler = new_rot

        # Figure out the left, up, and behind directions along the plane and record
        # them in the scene structure
        plane_normal = plane.data.vertices[0].normal
        cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
        cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
        cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
        plane_behind = (cam_behind -
                        cam_behind.project(plane_normal)).normalized()
        plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
        plane_up = cam_up.project(plane_normal).normalized()

        # Delete the plane; we only used it for normals anyway. The base scene file
        # contains the actual ground plane.
        utils.delete_object(plane)

        # Save all six axis-aligned directions in the scene struct
        scene_struct['directions']['behind'] = tuple(plane_behind)
        scene_struct['directions']['front'] = tuple(-plane_behind)
        scene_struct['directions']['left'] = tuple(plane_left)
        scene_struct['directions']['right'] = tuple(-plane_left)
        scene_struct['directions']['above'] = tuple(plane_up)
        scene_struct['directions']['below'] = tuple(-plane_up)

        # Now make some random objects
        blender_objects = add_objects(scene_struct, camera, objects)

        # Render the scene and dump the scene data structure
        scene_struct['objects'] = objects
        scene_struct['relationships'] = compute_all_relationships(scene_struct)

        while True:
            try:
                bpy.ops.render.render(write_still=True)
                break
            except Exception as e:
                print(e)

        with open(output_scene, 'w') as f:
            json.dump(scene_struct, f, indent=2)

        if output_blendfile is not None:
            bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)

    else:
        for added in [0.5, 1, 2]:
            loc_dict = {}
            objects_args = objects.copy()
            for ob in objects_args:
                if ob["location"][0] not in loc_dict:
                    loc_dict[ob["location"][0]] = ob["location"][0]
            new_loc = new_stack_x(args, added)
            for idx, k in enumerate(
                    sorted(loc_dict.keys(), key=lambda z: loc_dict[z])):
                loc_dict[k] = new_loc[idx]
            for ob in objects_args:
                ob["location"] = (loc_dict[ob["location"][0]],
                                  ob["location"][1], ob["location"][2])
            new_im_path = "".join(
                [output_image.split(".png")[0], "_" + str(added), ".png"])
            new_scene_path = "".join(
                [output_scene.split(".json")[0], "_" + str(added), ".json"])
            if os.path.isfile(new_im_path):
                continue
            render_scene(args, output_index, output_split, new_im_path,
                         new_scene_path, output_blendfile, objects_args, False)
コード例 #14
0
def render_scene(
    args,
    num_objects=5,
    output_index=0,
    output_split="none",
    output_image="render.png",
    output_mask="mask.png",
    output_scene="render_json",
    output_blendfile=None,
):

    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = "CUDA"
            bpy.context.user_preferences.system.compute_device = "CUDA_0"
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                "cycles"].preferences
            cycles_prefs.compute_device_type = "CUDA"

    # Some CYCLES-specific stuff
    bpy.data.worlds["World"].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = "GPU"

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        "split": output_split,
        "image_index": output_index,
        "image_filename": os.path.basename(output_image),
        "mask_filename": os.path.basename(output_mask),
        "objects": [],
        "directions": {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Add random jitter to camera position
    if args.camera_jitter > 0:
        for i in range(3):
            bpy.data.objects["Camera"].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects["Camera"]
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct["directions"]["behind"] = tuple(plane_behind)
    scene_struct["directions"]["front"] = tuple(-plane_behind)
    scene_struct["directions"]["left"] = tuple(plane_left)
    scene_struct["directions"]["right"] = tuple(-plane_left)
    scene_struct["directions"]["above"] = tuple(plane_up)
    scene_struct["directions"]["below"] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects["Lamp_Key"].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects["Lamp_Back"].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects["Lamp_Fill"].location[i] += rand(
                args.fill_light_jitter)

    # from pudb import set_trace

    # set_trace()

    # Now make some random objects
    objects, blender_objects = add_random_objects(scene_struct,
                                                  num_objects,
                                                  args,
                                                  camera,
                                                  output_mask=output_mask)

    # Render the scene and dump the scene data structure
    scene_struct["objects"] = objects
    scene_struct["relationships"] = compute_all_relationships(scene_struct)
    while True:
        try:
            bpy.ops.render.render(write_still=True)
            break
        except Exception as e:
            print(e)

    with open(output_scene, "w") as f:
        json.dump(scene_struct, f, indent=2)

    if output_blendfile is not None:
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
コード例 #15
0
def render_scene(args,
                 num_objects=5,
                 output_index=0,
                 output_split='none',
                 output_image='render.png',
                 output_scene='render_json',
                 output_blendfile=None,
                 camera_info_path=None,
                 object_info_path=None):
    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    output_image_dir = os.path.join(
        args.output_image_dir,
        os.path.basename(output_image).split('.')[0])

    mkdir_p(output_image_dir)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    bpy.ops.screen.frame_jump(end=False)
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    # render_args.engine = "BLENDER_RENDER"
    render_args.filepath = os.path.abspath(
        os.path.join(output_image_dir, 'RGB'))
    # render_args.filepath = os.path.abspath(output_image)
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    # render_args.image_settings.file_format = 'AVI_JPEG'
    render_args.image_settings.file_format = 'JPEG'
    render_args.image_settings.use_zbuffer = True  # render depth
    # Video params
    bpy.context.scene.frame_start = 0
    bpy.context.scene.frame_end = args.num_frames  # same as kinetics
    render_args.fps = args.fps

    # render depth
    bpy.context.scene.render.use_compositing = True
    bpy.data.scenes['Scene'].use_nodes = True
    tree = bpy.data.scenes['Scene'].node_tree
    links = tree.links

    # create input render layer node
    rl = tree.nodes['Render Layers']

    # map_value_node = tree.nodes.new("CompositorNodeMapValue") # map the depth to [0, 1] st we can save it as jpg
    # map_value_node.size = [0.05,]

    output_file_node = tree.nodes.new(
        "CompositorNodeOutputFile")  # this nodes saves image
    output_file_node.base_path = os.path.abspath(output_image_dir)
    output_file_node.format.file_format = 'OPEN_EXR'  # this is linear format w/o distortion
    output_file_node.file_slots[0].path = "Depth"

    # links.new(rl.outputs[2], map_value_node.inputs['Value'])
    # links.new(map_value_node.outputs['Value'], output_file_node.inputs['Image'])
    links.new(rl.outputs[2], output_file_node.inputs['Image'])

    # if args.random_camera:
    #     assert args.num_cameras == 1 # we don't support both for now

    if args.num_cameras > 1:
        camera_poses = sample_camera_poses(num_samples=args.num_cameras)
        active_camera_list = []
        bpy.data.scenes['Scene'].render.use_multiview = True
        bpy.data.scenes['Scene'].render.views_format = 'MULTIVIEW'
        bpy.data.scenes['Scene'].render.views['left'].use = False
        bpy.data.scenes['Scene'].render.views['right'].use = False
        bpy.ops.scene.render_view_add()  # update the scene
        bpy.data.scenes['Scene'].render.views[
            'RenderView'].use = False  # disable default cam
        # disable

        for i in range(1, args.num_cameras + 1):
            bpy.ops.scene.render_view_add()
            current_view_name = 'RenderView' + '.' + str(i).zfill(
                3)  # e.g. RenderView.001
            bpy.data.scenes['Scene'].render.views[
                current_view_name].camera_suffix = '_' + str(i)  # e.g._1
            add_new_camera('Camera_' + str(i), camera_poses[i - 1])
            active_camera_list.append('Camera_' + str(i))

        # add_new_camera('Camera_L', (7.48, -6.5, 5.34))
        # add_new_camera('Camera_R', (7.48, 6.5, 5.34))

        # bpy.ops.scene.render_view_add() # update the scene
        # bpy.data.scenes['Scene'].render.views['RenderView'].use = False # disable default cam
        # active_camera_list = ['Camera_L', 'Camera_R']
    else:
        active_camera_list = ['Camera']

    if args.cpu is False:
        # Blender changed the API for enabling CUDA at some point
        # bpy.context.user_preferences.system.compute_device_type = 'CUDA'
        # bpy.context.user_preferences.system.compute_device = 'CUDA_0'

        if bpy.app.version < (2, 78, 0) or os.name == 'nt':
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'

        else:
            # cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
            # cycles_prefs.compute_device_type = 'CUDA'

            bpy.context.user_preferences.addons[
                'cycles'].preferences.compute_device_type = 'CUDA'
            bpy.context.user_preferences.addons[
                'cycles'].preferences.compute_device = 'CUDA_0'

            # # In case more than 1 device passed in, use only the first one
            # Not effective, CUDA_VISIBLE_DEVICES before running singularity
            # works fastest.
            # if len(cycles_prefs.devices) > 2:
            #     for device in cycles_prefs.devices:
            #         device.use = False
            #     cycles_prefs.devices[1].use = True
            #     print('Too many GPUs ({}). Using {}. Set only 1 before '
            #           'running singularity.'.format(
            #               len(cycles_prefs.devices),
            #               cycles_prefs.devices[1]))

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.cpu is False:
        bpy.context.scene.cycles.device = 'GPU'

    if output_blendfile is not None and os.path.exists(output_blendfile):
        logging.info(
            'Loading pre-defined BLEND file from {}'.format(output_blendfile))
        bpy.ops.wm.open_mainfile(filepath=output_blendfile)
    else:
        setup_scene(args, num_objects, output_index, output_split,
                    output_image, output_scene)

    if args.random_camera:
        add_random_camera_motion(args.num_frames, active_camera_list)

    if camera_info_path is not None:
        save_camera_info(camera_info_path, active_camera_list)
    if object_info_path is not None:
        save_object_info(object_info_path)

    if output_blendfile is not None and not os.path.exists(output_blendfile):
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
    # max_num_render_trials = 10
    if args.render:
        # bpy.context.scene.render.resolution_percentage = 100
        bpy.ops.render.render(animation=True)
コード例 #16
0
def render_scene(args,
                 output_index=0,
                 output_split='none',
                 output_image='render.png',
                 output_scene='render_json',
                 output_blendfile=None,
                 idx=-1):

    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'image_index': output_index,
        'image_filename': os.path.basename(output_image),
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Add random jitter to camera position
    if args.camera_jitter > 0:
        for i in range(3):
            bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['Camera']
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    # Now make some random objects
    objects, blender_objects = add_random_objects(scene_struct, args, camera,
                                                  idx)

    for i in range(len(objects)):
        objects[i]['idx'] = i + 1

    ### get b_box
    box_dict = get_b_box.main(bpy.context, blender_objects)

    def build_rendermask_graph(num_obj):
        # switch on nodes
        bpy.context.scene.use_nodes = True
        tree = bpy.context.scene.node_tree
        links = tree.links

        # clear default nodes
        for n in tree.nodes:
            tree.nodes.remove(n)

        # create input render layer node
        rl = tree.nodes.new('CompositorNodeRLayers')
        rl.location = 185, 285

        scene = bpy.context.scene
        nodes = scene.node_tree.nodes

        render_layers = nodes['Render Layers']

        ofile_nodes = [
            nodes.new("CompositorNodeOutputFile") for _ in range(num_obj)
        ]
        for _i, of_node in enumerate(ofile_nodes):
            of_node.base_path = "./tmp_graph_output/indexob{}_{}".format(
                _i, idx)

        idmask_list = [
            nodes.new("CompositorNodeIDMask") for _ in range(num_obj)
        ]
        for _i, o_node in enumerate(idmask_list):
            o_node.index = _i + 1

        bpy.data.scenes['Scene'].render.layers[
            'RenderLayer'].use_pass_object_index = True

        for _i in range(num_obj):
            scene.node_tree.links.new(render_layers.outputs['IndexOB'],
                                      idmask_list[_i].inputs[0])
            scene.node_tree.links.new(idmask_list[_i].outputs[0],
                                      ofile_nodes[_i].inputs['Image'])

    def get_diff_obj_points():
        obj_index = dict()
        index = 0
        for obj in blender_objects:
            index += 1
            obj.pass_index = index
            obj_index[obj.name] = index
        return index

    index = get_diff_obj_points()
    build_rendermask_graph(index)

    # Render the scene and dump the scene data structure
    scene_struct['objects'] = objects
    scene_struct['relationships'] = compute_all_relationships(scene_struct)
    while True:
        try:
            bpy.ops.render.render(write_still=True)
            break
        except Exception as e:
            print(e)

    #save_as_json
    cmd = ['python', './restore_img2json.py', str(index), str(idx)]
    res = subprocess.Popen(cmd, stdout=subprocess.PIPE)
    res.wait()
    print('res: ', res.returncode)
    if res.returncode != 0:
        print("  os.wait:exit status != 0\n")
        result = res.stdout.read()
        print("after read: {}".format(result))
        raise Exception('error in img2json')

    obj_mask = json.load(open('/tmp/obj_mask_{}.json'.format(idx)))
    _path = '/tmp/obj_mask_{}.json'.format(idx)
    os.system('rm ' + _path)

    scene_struct['obj_mask'] = obj_mask
    scene_struct['obj_bbox'] = box_dict

    with open(output_scene, 'w') as f:
        json.dump(scene_struct, f, indent=2)

    if output_blendfile is not None:
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
コード例 #17
0
def render_scene(
    args,
    num_objects=5,
    output_index=0,
    output_split='none',
    output_image='render.png',
    output_scene='render.json',
    output_blendfile='render.blend',
):

    # Load the main blendfile
    bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

    # Load materials
    utils.load_materials(args.material_dir)

    # Set render arguments so we can get pixel coordinates later.
    # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
    # cannot be used.
    render_args = bpy.context.scene.render
    render_args.engine = "CYCLES"
    render_args.filepath = output_image
    render_args.resolution_x = args.width
    render_args.resolution_y = args.height
    render_args.resolution_percentage = 100
    render_args.tile_x = args.render_tile_size
    render_args.tile_y = args.render_tile_size
    if args.use_gpu == 1:
        # Blender changed the API for enabling CUDA at some point
        if bpy.app.version < (2, 78, 0):
            bpy.context.user_preferences.system.compute_device_type = 'CUDA'
            bpy.context.user_preferences.system.compute_device = 'CUDA_0'
        else:
            cycles_prefs = bpy.context.user_preferences.addons[
                'cycles'].preferences
            cycles_prefs.compute_device_type = 'CUDA'

    # Some CYCLES-specific stuff
    bpy.data.worlds['World'].cycles.sample_as_light = True
    bpy.context.scene.cycles.blur_glossy = 2.0
    bpy.context.scene.cycles.samples = args.render_num_samples
    bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
    bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
    if args.use_gpu == 1:
        bpy.context.scene.cycles.device = 'GPU'

    # This will give ground-truth information about the scene and its objects
    scene_struct = {
        'split': output_split,
        'image_index': output_index,
        'image_filename': os.path.basename(output_image),
        'objects': [],
        'directions': {},
    }

    # Put a plane on the ground so we can compute cardinal directions
    bpy.ops.mesh.primitive_plane_add(radius=5)
    plane = bpy.context.object

    def rand(L):
        return 2.0 * L * (random.random() - 0.5)

    # Add random jitter to camera position
    if args.camera_jitter > 0:
        for i in range(3):
            bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

    # Figure out the left, up, and behind directions along the plane and record
    # them in the scene structure
    camera = bpy.data.objects['Camera']
    plane_normal = plane.data.vertices[0].normal
    cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
    cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
    cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
    plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
    plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
    plane_up = cam_up.project(plane_normal).normalized()

    # Delete the plane; we only used it for normals anyway. The base scene file
    # contains the actual ground plane.
    utils.delete_object(plane)

    # Save all six axis-aligned directions in the scene struct
    scene_struct['directions']['behind'] = tuple(plane_behind)
    scene_struct['directions']['front'] = tuple(-plane_behind)
    scene_struct['directions']['left'] = tuple(plane_left)
    scene_struct['directions']['right'] = tuple(-plane_left)
    scene_struct['directions']['above'] = tuple(plane_up)
    scene_struct['directions']['below'] = tuple(-plane_up)

    # Add random jitter to lamp positions
    if args.key_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Key'].location[i] += rand(
                args.key_light_jitter)
    if args.back_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Back'].location[i] += rand(
                args.back_light_jitter)
    if args.fill_light_jitter > 0:
        for i in range(3):
            bpy.data.objects['Lamp_Fill'].location[i] += rand(
                args.fill_light_jitter)

    # Now make some random objects
    objects, blender_objects = add_random_objects(scene_struct, num_objects,
                                                  args, camera)

    # Render the scene and dump the scene data structure
    scene_struct['objects'] = objects
    scene_struct['relationships'] = compute_all_relationships(scene_struct)
    while True:
        try:
            if args.save_blendfiles != 1:
                bpy.ops.render.render(write_still=True)
            break
        except Exception as e:
            print(e)

    with open(output_scene, 'w') as f:
        json.dump(scene_struct, f, indent=2)

    if args.save_blendfiles == 1:
        bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)
        with open(output_blendfile,
                  'rb') as f_in, gzip.open(output_blendfile + '.gz',
                                           'wb',
                                           compresslevel=1) as f_out:
            shutil.copyfileobj(f_in, f_out)
        os.remove(output_blendfile)
    return
コード例 #18
0
def render_scene(args, num_objects=5, output_index=0, output_split='none', output_image='render.png', output_scene='render_json', output_blendfile=None):
  #===========================================
  # Load the main blendfile
  bpy.ops.wm.open_mainfile(filepath=args.base_scene_blendfile)

  # Load materials
  utils.load_materials(args.material_dir)

  # Set render arguments so we can get pixel coordinates later.
  # We use functionality specific to the CYCLES renderer so BLENDER_RENDER
  # cannot be used.
  render_args = bpy.context.scene.render
  render_args.engine = "CYCLES"
  render_args.filepath = output_image
  render_args.resolution_x = args.width
  render_args.resolution_y = args.height
  render_args.resolution_percentage = 100
  render_args.tile_x = args.render_tile_size
  render_args.tile_y = args.render_tile_size
  if args.use_gpu == 1:
    # Blender changed the API for enabling CUDA at some point
    if bpy.app.version < (2, 78, 0):
      bpy.context.user_preferences.system.compute_device_type = 'CUDA'
      bpy.context.user_preferences.system.compute_device = 'CUDA_0'
    else:
      cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
      cycles_prefs.compute_device_type = 'CUDA'

  # Some CYCLES-specific stuff
  bpy.data.worlds['World'].cycles.sample_as_light = True
  bpy.context.scene.cycles.blur_glossy = 2.0
  bpy.context.scene.cycles.samples = args.render_num_samples
  bpy.context.scene.cycles.transparent_min_bounces = args.render_min_bounces
  bpy.context.scene.cycles.transparent_max_bounces = args.render_max_bounces
  if args.use_gpu == 1:
    bpy.context.scene.cycles.device = 'GPU'
  #==================================
  # This will give ground-truth information about the scene and its objects
  scene_struct = {
      'split': output_split,
      'image_index': output_index,
      'image_filename': os.path.basename(output_image),
      'objects': [],
      'directions': {},
  }

  # Put a plane on the ground so we can compute cardinal directions
  bpy.ops.mesh.primitive_plane_add(radius=5)
  plane = bpy.context.object


  def rand(L):
    return 2.0 * L * (random.random() - 0.5)

  #Check if we want to distinct groups by
  if args.num_groups != 1 or args.group_by_continuity == 1 or args.group_by_good_figure==1 or args.group_by_closure==1 or args.group_by_similarity==1:
    bpy.data.objects['Camera'].location[0] = 14.43113
    bpy.data.objects['Camera'].location[1] = -7.18764
    bpy.data.objects['Camera'].location[2] = 20.93366


  # Add random jitter to camera position
  if args.camera_jitter > 0:
    for i in range(3):
      bpy.data.objects['Camera'].location[i] += rand(args.camera_jitter)

  print("Camera loc")
  print(bpy.data.objects['Camera'].location)


  # Figure out the left, up, and behind directions along the plane and record
  # them in the scene structure
  camera = bpy.data.objects['Camera']
  plane_normal = plane.data.vertices[0].normal
  print("Plan_normal")
  print(plane_normal)

  cam_behind = camera.matrix_world.to_quaternion() * Vector((0, 0, -1))
  print("Camera behind")
  print(cam_behind)
  cam_left = camera.matrix_world.to_quaternion() * Vector((-1, 0, 0))
  cam_up = camera.matrix_world.to_quaternion() * Vector((0, 1, 0))
  plane_behind = (cam_behind - cam_behind.project(plane_normal)).normalized()
  plane_left = (cam_left - cam_left.project(plane_normal)).normalized()
  plane_up = cam_up.project(plane_normal).normalized()

  # Delete the plane; we only used it for normals anyway. The base scene file
  # contains the actual ground plane.
  utils.delete_object(plane)

  # Save all six axis-aligned directions in the scene struct
  scene_struct['directions']['behind'] = tuple(plane_behind)
  scene_struct['directions']['front'] = tuple(-plane_behind)
  scene_struct['directions']['left'] = tuple(plane_left)
  scene_struct['directions']['right'] = tuple(-plane_left)
  scene_struct['directions']['above'] = tuple(plane_up)
  scene_struct['directions']['below'] = tuple(-plane_up)

  if args.num_groups != 1 or args.group_by_continuity == 1 or args.group_by_good_figure==1 or args.group_by_closure==1 or args.group_by_similarity==1:
    bpy.data.objects['Lamp_Key'].location[0]=10.15671
    bpy.data.objects['Lamp_Back'].location[2]=10.10574
    bpy.data.objects['Lamp_Fill'].location[1]=-12.0736

  # Add random jitter to lamp positions
  if args.key_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Key'].location[i] += rand(args.key_light_jitter)
  if args.back_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Back'].location[i] += rand(args.back_light_jitter)
  if args.fill_light_jitter > 0:
    for i in range(3):
      bpy.data.objects['Lamp_Fill'].location[i] += rand(args.fill_light_jitter)

  #My Code--------------------------------

  #----------group
  if args.num_groups==0:
    num_groups=random.randint(2,5)
  else:
    num_groups=args.num_groups
  #-----------end group

  # Now make some random objects
  #objects, blender_objects = add_random_objects(scene_struct, num_objects, args, camera)     #// Old code
  print("Generate Scene")
  if args.group_by_postition == 1:
    objects, blender_objects = add_random_objects_groups(scene_struct, num_groups, args, camera, num_objects)
  elif args.group_by_similarity == 1:
    objects, blender_objects = add_random_objects_group_by_similarity(scene_struct, args, camera)
  elif args.group_by_continuity ==1:
    objects, blender_objects = add_random_objects_continuity(scene_struct,args,camera)
  elif args.group_by_good_figure == 1:
    objects, blender_objects = add_random_objects_good_figure(scene_struct,args,camera)
  elif args.group_by_closure == 1:
    objects, blender_objects = add_random_objects_closure(scene_struct,args,camera)
  else:
    objects, blender_objects = add_random_objects(scene_struct,num_objects,args,camera)

  print("Generate Scene finished")

  # Render the scene and dump the scene data structure
  scene_struct['objects'] = objects
  scene_struct['relationships'] = compute_all_relationships(scene_struct)
  while True:
    try:
      print("Render Scene")
      bpy.ops.render.render(write_still=True)
      break
    except Exception as e:
      print(e)

  with open(output_scene, 'w') as f:
    json.dump(scene_struct, f, indent=2)

  if output_blendfile is not None:
    bpy.ops.wm.save_as_mainfile(filepath=output_blendfile)