示例#1
0
 def Render(self,sampleCount):
         currScene = Scene(self.scene)
         for light in self.light:
                 currScene.addChild(light)
         currScene.configure()    
         currScene.addSensor(self.cam)   
         currScene.setSensor(self.cam) 
         self.__createSampler(sampleCount) # sample count
         currScene.setSampler(self.sampler)
      
         currScene.setDestinationFile('')
         # Create a render job and insert it into the queue
         job = RenderJob('myRenderJob', currScene, self.queue )
         job.start()
         self.queue.waitLeft(0)
         self.queue.join()
         
         film = currScene.getFilm()
         size = film.getSize()
         bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
         film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)
         # End of render - get result
         result_image = np.array(bitmap.getNativeBuffer())                                
         currSceneInfo = currScene.getAABB
         return result_image, currSceneInfo
示例#2
0
    def Render(self, sampleCount, i):
        ## Creating a copy of the base scene and add modifications regarding varaiant camera's properties (sensor position, sampler)
        currScene = Scene(self.scene)
        currScene.configure()
        pmgr = PluginManager.getInstance()
        currScene.addSensor(self.cam)
        currScene.setSensor(self.cam)
        self.createSampler(sampleCount)
        currScene.setSampler(self.sampler)  #(self.sampler)
        currScene.setDestinationFile('')

        ## Create a render job and insert it into the queue
        #job = RenderJob('myRenderJob'+str(i), currScene, self.queue )
        curSceneResID = self.scheduler.registerResource(currScene)
        job = RenderJob('myRenderJob' + str(i), currScene, self.queue,
                        curSceneResID)
        #job = RenderJob('myRenderJob'+str(i), currScene, self.queue,self.sceneResID )  # passing self.sceneResID - in order to create shallow copy of the scene to all warkers
        job.start()

        self.queue.waitLeft(0)
        self.queue.join()

        ## Aquire Bitmap format of the rendered image:
        film = currScene.getFilm()
        size = film.getSize()
        bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
        film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)

        ## End of render - get result
        result_image = np.array(
            bitmap.buffer()) if sys.platform == 'linux2' else np.array(
                bitmap.getNativeBuffer())
        # TODO : update Mitsuba version of Windows, with the updated API - bitmap.getNativeBuffer() doesn't exsists animore
        currSceneInfo = currScene.getAABB
        return result_image, currSceneInfo
def render_scene(passes, scene_config, render_config):

    camera_params = render_config["CameraParams"]
    start= render_config["iteration_start"]
    end= render_config["iteration_end"]
    output_path = render_config["OutputPath"]
    pmgr = PluginManager.getInstance()

    for index in range(start,end):
        camera = camera_params[index]
        new_transform = camera["new_transform"]
        for p in passes:

            destination = output_path + '%03i' % (index)
            if p.type == RenderTargetType.DEPTH:
                destination += "_d"
            elif p.type == RenderTargetType.NORMAL:
                destination += "_n"

            scene = Scene()
            pass_config = scene_config
            #Set the pass integrator
            pass_config['integrator'] = p.config
            sceneResID = register_scene_config(scene, pmgr, scene_config)

            newScene = mitsuba.render.Scene(scene)
            pmgr = PluginManager.getInstance()
            newSensor = pmgr.createObject(scene.getSensor().getProperties())
            newSensor.setWorldTransform(new_transform)

            newFilm = pmgr.createObject(scene.getFilm().getProperties())
            newFilm.configure()
            newSensor.addChild(newFilm)
            newSensor.configure()
            newScene.addSensor(newSensor)
            newScene.setSensor(newSensor)
            newScene.setSampler(scene.getSampler())
            newScene.setDestinationFile(destination)

            # Create a render job and insert it into the queue. Note how the resource
            # ID of the original scene is provided to avoid sending the full scene
            # contents over the network multiple times.
            j = RenderJob('myRenderJob' + str(index), newScene, queue, sceneResID)
            j.start()

        queue.waitLeft(0)
def main(input_file, output_dir, output_name, img_width, img_height, num_samples):

    pmgr = PluginManager.getInstance()

    # Bed pos
    camera_pos = Vector(0, 1.0, 2.0)
    model_pos = Vector(0.0, 0, 0.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename=input_file)

    # integrator = create_integrator(RenderTargetType.AO, hide_emitters=False)
    integrator = create_integrator(RenderTargetType.DIRECT, hide_emitters=False)
    depth_integrator = create_integrator(RenderTargetType.DEPTH, hide_emitters=False)
    position_integrator = create_integrator(RenderTargetType.POSITION, hide_emitters=False)
    normal_integrator = create_integrator(RenderTargetType.NORMAL, hide_emitters=False)
    #uv_integrator = create_integrator(RenderTargetType.UV, hide_emitters=False)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.LDR, img_width, img_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type': 'scene',
        'a': a.config,
        # 'b':b.config,
        'envmap': {
            'type': 'sunsky',
            'hour': 12.0,
            'albedo': Spectrum(1.0),
            'samplingWeight': 1.0,
        },
        # 'envmap' : {
        #      'type' : 'sunsky',
        #      #'hour' : 10.0,
        #      'radiance': Spectrum(1.0),
        #      'samplingWeight': 1.0
        # },
        'sensor': sensor.config
    }

    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())
    num_views = 6
    xangles = [y for y in frange(0, 360, np.floor(360 / num_views))]
    # xangles = [x for x in frange(0,12, 1.0)]
    yangles = [0.0]
    print(yangles)
    # num_views = len(xangles) * len(yangles)
    # step_size = 360/(num_views)
    step_size = 1

    # List containing the integrators to use in Multi-Pass rendering
    passes = [integrator, depth_integrator, normal_integrator, position_integrator]

    start = time.time()
    render_count = 0
    num_scale = 1
    offset = num_scale / 2
    print("Size:", (num_views * num_scale))

    # translations = [xt for xt in frange(-0.5,1.0, 0.5)]
    translations = []

    num_images = len(yangles) * len(xangles) * len(translations) * len(translations) * num_scale
    print("Number of images: ", str(num_images))

    j = None

    filename = get_filename(input_file)

    # for xt in translations:
    original_x = camera_pos[0]
    # for yt in translations:
    original_y = camera_pos[1]
    for x in xangles:
        for y in yangles:

            original_Z = camera_pos[2]
            new_camera = camera_pos
            new_camera[0] = original_x
            new_camera[2] = original_Z

            for p in passes:

                i = render_count
                scene = Scene()
                pass_config = scene_config

                destination = output_dir

                if p.type == RenderTargetType.DIRECT or p.type == RenderTargetType.PATH:
                    destination = os.path.join(destination, 'rgb')
                elif p.type == RenderTargetType.DEPTH:
                    destination = os.path.join(destination, 'depth')
                elif p.type == RenderTargetType.NORMAL:
                    destination = os.path.join(destination, 'normal')
                elif p.type == RenderTargetType.SH_NORMAL:
                    destination = os.path.join(destination, 'sh_normal')
                elif p.type == RenderTargetType.POSITION:
                    destination = os.path.join(destination, 'pos')
                elif p.type == RenderTargetType.UV:
                    destination = os.path.join(destination, 'uv')

                check_mkdir(destination)
                destination = os.path.join(destination, output_name + '_%03i' % (i))

                    # Set the pass integrator
                pass_config['integrator'] = p.config
                sceneResID = register_scene_config(scene, pmgr, scene_config)

                # Create a shallow copy of the scene so that the queue can tell apart the two
                # rendering processes. This takes almost no extra memory
                newScene = mitsuba.render.Scene(scene)
                pmgr = PluginManager.getInstance()
                newSensor = pmgr.createObject(scene.getSensor().getProperties())

                # Calculate the rotations
                yrotation = Transform.rotate(Vector(1, 0, 0), y)
                xrotation = Transform.rotate(Vector(0, 1, 0), x)
                rotationCur = xrotation * yrotation

                # Set the new camera position, applying the rotations
                new_pos = rotationCur * new_camera
                print(new_pos)
                new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
                newSensor.setWorldTransform(new_transform)

                newFilm = pmgr.createObject(scene.getFilm().getProperties())
                newFilm.configure()
                newSensor.addChild(newFilm)
                newSensor.configure()
                newScene.addSensor(newSensor)
                newScene.setSensor(newSensor)
                newScene.setSampler(scene.getSampler())
                newScene.setDestinationFile(destination)

                # Create a render job and insert it into the queue. Note how the resource
                # ID of the original scene is provided to avoid sending the full scene
                # contents over the network multiple times.
                j = RenderJob('myRenderJob' + str(i), newScene, queue, sceneResID)
                j.start()

            queue.waitLeft(0)
            render_count += 1

        print("Full Set")

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)

    finish = time.time()
    print("Run Time:", finish - start)
def init_scene():

    pmgr = PluginManager.getInstance()
    scene = Scene()
    scene.setDestinationFile('renderedResult')

    camera_pos = Vector(0, 0.0, -12)
    model_pos = Vector(0.5, -0.5, -2.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename=mesh_file)
    b = MitsubaShape(shape_type=MitsubaShape.CUBE, to_world=Transform.translate(model_pos))

    integrator = create_integrator(RenderTargetType.DEPTH, hide_emitters=False)
    #integrator = create_integrator(IntegratorType.DIRECT, hide_emitters=True)
    #integrator = create_integrator(RenderTargetType.NORMAL, hide_emitters=True)

    print(integrator.config)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.HDR, image_width, image_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type' : 'scene',
              'integrator' : {
                'type' : 'multichannel',
                # 'a': {
                #     'type' : 'path'
                # },
                # 'b': {
                #     'type' : 'field',
                #     'field' : 'distance',
                #     'undefined': 0.0
                # },
                'c': {
                    'type' : 'field',
                    'field' : 'distance',
                    'undefined': 0.0
                }
            },
            'sphere' : {
                'type' : 'sphere',
                'bsdf' : {
                    'type' : 'dielectric',
                    'reflectance' : Spectrum(0.4)
                }
            },
            'envmap' : {
                'type' : 'sunsky',
                'albedo' : Spectrum(0.5)
            },
            'sensor' : {
                'type' : 'perspective',
                'toWorld' : Transform.translate(Vector(0, 0, 0)),
                'sampler' : {
                    'type' : 'halton',
                    'sampleCount' : 64

                },
                'film' : {
                    'type' : 'ldrfilm',
                    'width' : 500,
                    'height' : 500,
                    'pixelFormat': "rgb",
                    'channelNames': "normal"
                }
            },

        }

    # # Add a shape
    # scene.addChild(pmgr.create({
    # 'type' : 'sphere',
    # 'center' : Point(0, 0, 0),
    # 'radius' : 1.0,
    # 'bsdf' : {
    # 'type' : 'diffuse',
    # 'reflectance' : Spectrum(0.4)
    # }
    # }))




    scene.addChild(pmgr.create(scene_config))
    scene.configure()


    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())
    # scene_node = pmgr.create(scene_config)
    # scene.addChild(scene_node)
    # scene.configure()

    scene.initialize()
    sceneResID = scheduler.registerResource(scene)

    num_views = 1
    step_size = 360/(num_views)
    for i in range(num_views):
        destination = 'results/result_%03i' % i
        # Create a shallow copy of the scene so that the queue can tell apart the two
        # rendering processes. This takes almost no extra memory
        newScene = mitsuba.render.Scene(scene)
        pmgr = PluginManager.getInstance()
        newSensor = pmgr.createObject(scene.getSensor().getProperties())
        # <change the position of 'newSensor' here>

        rotationCur = Transform.rotate(Vector(0, 1, 0), i*step_size)
        new_pos = rotationCur*camera_pos
        new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
        newSensor.setWorldTransform(new_transform)

        newFilm = pmgr.createObject(scene.getFilm().getProperties())
        newFilm.configure()
        newSensor.addChild(newFilm)
        newSensor.configure()
        newScene.addSensor(newSensor)
        newScene.setSensor(newSensor)
        newScene.setSampler(scene.getSampler())
        newScene.setDestinationFile(destination)
        # Create a render job and insert it into the queue. Note how the resource
        # ID of the original scene is provided to avoid sending the full scene
        # contents over the network multiple times.
        job = RenderJob('myRenderJob' + str(i), scene, queue, sceneResID)
        job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
def init_scene():

    pmgr = PluginManager.getInstance()
    scene = Scene()
    scene.setDestinationFile('renderedResult')

    camera_pos = Vector(0, 0.0, -12)
    model_pos = Vector(0.5, -0.5, -2.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename='/media/adrian/Data/Datasets/train/02691156/model_0000003.obj')
    b = MitsubaShape(shape_type=MitsubaShape.CUBE, to_world=Transform.translate(model_pos))

    integrator = Integrator(Integrator.DIRECT, hide_emitters=True)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.LDR, image_width, image_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type' : 'scene',
        'a': a.config,
        'envmap' : {
             'type' : 'sunsky',
             'hour' : 12.0,
             'albedo' : Spectrum(1.0),
             'samplingWeight' : 1.0,
        },
        # 'envmap' : {
        #      'type' : 'constant',
        #      #'hour' : 10.0,
        #      'radiance' : Spectrum(1.0),
        #      'samplingWeight' : 0.5
        # },
        # 'integrator' : {
        #     'type' : 'multichannel',
        #     'depth' : {
        #         'type' : 'field',
        #         'field' : 'distance'
        #     },
        # },
        'integrator' : integrator.config,
        'sensor' : sensor.config
    }

    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())
    scene_node = pmgr.create(scene_config)
    scene.addChild(scene_node)
    scene.configure()

    scene.initialize()
    sceneResID = scheduler.registerResource(scene)

    num_views = 6
    step_size = 360/(num_views)
    for i in range(num_views):
        destination = 'results/result_%03i' % i
        # Create a shallow copy of the scene so that the queue can tell apart the two
        # rendering processes. This takes almost no extra memory
        newScene = mitsuba.render.Scene(scene)
        pmgr = PluginManager.getInstance()
        newSensor = pmgr.createObject(scene.getSensor().getProperties())
        # <change the position of 'newSensor' here>

        rotationCur = Transform.rotate(Vector(0, 1, 0), i*step_size)
        new_pos = rotationCur*camera_pos
        new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
        newSensor.setWorldTransform(new_transform)

        newFilm = pmgr.createObject(scene.getFilm().getProperties())
        newFilm.configure()
        newSensor.addChild(newFilm)
        newSensor.configure()
        newScene.addSensor(newSensor)
        newScene.setSensor(newSensor)
        newScene.setSampler(scene.getSampler())
        newScene.setDestinationFile(destination)
        # Create a render job and insert it into the queue. Note how the resource
        # ID of the original scene is provided to avoid sending the full scene
        # contents over the network multiple times.
        job = RenderJob('myRenderJob' + str(i), newScene, queue, sceneResID)
        job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
def init_scene():

    pmgr = PluginManager.getInstance()

    camera_pos = Vector(0, 0.0, -12)
    model_pos = Vector(0.5, -0.5, -2.0)

    #camera_pos = Vector(4, 44.0, -7.0)
    #model_pos = Vector(0.0, 0.0, 0.0)

    #camera_matrix = Transform(Matrix4x4([[0.1,0.017,-1.0,0.0],[0.0,1.0,0.0,0.1],[1.0,0.0,0.1,0.0],[4.3,-6.0,-7.0,1.0]]))

    cube_pos = Vector(-0.5, 0.0, -1.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename=mesh_file)
    b = MitsubaShape(shape_type=MitsubaShape.CUBE, to_world=Transform.translate(cube_pos))

    #integrator = create_integrator(RenderTargetType.AO, hide_emitters=False)
    integrator = create_integrator(RenderTargetType.DIRECT, hide_emitters=True)
    depth_integrator = create_integrator(RenderTargetType.INDEX, hide_emitters=True)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.LDR, image_width, image_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type' : 'scene',
        'a': a.config,
        # 'b':b.config,
        # 'envmap' : {
        #      'type' : 'sunsky',
        #      'hour' : 12.0,
        #      'albedo' : Spectrum(1.0),
        #      'samplingWeight' : 1.0,
        # },
        # 'envmap' : {
        #      'type' : 'sunsky',
        #      #'hour' : 10.0,
        #      'radiance': Spectrum(1.0),
        #      'samplingWeight': 1.0
        # },
        'sensor' : sensor.config
    }

    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())

    yangles = range(0,1)
    xangles = range(0,1)
    num_views = len(xangles) * len(yangles)
    #step_size = 360/(num_views)
    step_size = 1

    #List containing the integrators to use in Multi-Pass rendering
    passes = [integrator, depth_integrator]

    start = time.time()
    render_count = 0
    num_scale = 1
    offset = num_scale/2
    print ("Size:", (num_views*num_scale))

    for x in xangles:
        for y in yangles:
            original_Z = camera_pos[2]
            for s in range(num_scale):

                #for y in range(yangles):
                    #Set the correct destination file
                new_camera = camera_pos
                z = (s - offset)
                print ("Z:", z)
                print(new_camera[2])
                new_camera[2] = original_Z + z
                print(new_camera[2])

                for p in passes:

                    i = render_count
                    scene = Scene()
                    pass_config = scene_config

                    destination = 'results/%03i' % i
                    if p.type == RenderTargetType.DEPTH:
                        destination += "_d"
                    elif p.type == RenderTargetType.NORMAL:
                        destination += "_n"

                    #Set the pass integrator
                    pass_config['integrator'] = p.config
                    sceneResID = register_scene_config(scene, pmgr, scene_config)

                    # Create a shallow copy of the scene so that the queue can tell apart the two
                    # rendering processes. This takes almost no extra memory
                    newScene = mitsuba.render.Scene(scene)
                    pmgr = PluginManager.getInstance()
                    newSensor = pmgr.createObject(scene.getSensor().getProperties())

                    #Calculate the rotations
                    yrotation = Transform.rotate(Vector(1, 0, 0), y)
                    xrotation = Transform.rotate(Vector(0, 1, 0), x)
                    rotationCur = xrotation * yrotation

                    #Set the new camera position, applying the rotations
                    new_pos = rotationCur*new_camera
                    new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
                    newSensor.setWorldTransform(new_transform)

                    newFilm = pmgr.createObject(scene.getFilm().getProperties())
                    newFilm.configure()
                    newSensor.addChild(newFilm)
                    newSensor.configure()
                    newScene.addSensor(newSensor)
                    newScene.setSensor(newSensor)
                    newScene.setSampler(scene.getSampler())
                    newScene.setDestinationFile(destination)

                    # Create a render job and insert it into the queue. Note how the resource
                    # ID of the original scene is provided to avoid sending the full scene
                    # contents over the network multiple times.
                    j = RenderJob('myRenderJob' + str(i), newScene, queue, sceneResID)
                    j.start()

                render_count += 1


    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)

    finish = time.time()
    print("Run Time:", finish-start)