コード例 #1
0
 def Render(self,sampleCount):
         currScene = Scene(self.scene)
         for light in self.light:
                 currScene.addChild(light)
         currScene.configure()    
         currScene.addSensor(self.cam)   
         currScene.setSensor(self.cam) 
         self.__createSampler(sampleCount) # sample count
         currScene.setSampler(self.sampler)
      
         currScene.setDestinationFile('')
         # Create a render job and insert it into the queue
         job = RenderJob('myRenderJob', currScene, self.queue )
         job.start()
         self.queue.waitLeft(0)
         self.queue.join()
         
         film = currScene.getFilm()
         size = film.getSize()
         bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
         film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)
         # End of render - get result
         result_image = np.array(bitmap.getNativeBuffer())                                
         currSceneInfo = currScene.getAABB
         return result_image, currSceneInfo
コード例 #2
0
    def Render(self, sampleCount, i):
        ## Creating a copy of the base scene and add modifications regarding varaiant camera's properties (sensor position, sampler)
        currScene = Scene(self.scene)
        currScene.configure()
        pmgr = PluginManager.getInstance()
        currScene.addSensor(self.cam)
        currScene.setSensor(self.cam)
        self.createSampler(sampleCount)
        currScene.setSampler(self.sampler)  #(self.sampler)
        currScene.setDestinationFile('')

        ## Create a render job and insert it into the queue
        #job = RenderJob('myRenderJob'+str(i), currScene, self.queue )
        curSceneResID = self.scheduler.registerResource(currScene)
        job = RenderJob('myRenderJob' + str(i), currScene, self.queue,
                        curSceneResID)
        #job = RenderJob('myRenderJob'+str(i), currScene, self.queue,self.sceneResID )  # passing self.sceneResID - in order to create shallow copy of the scene to all warkers
        job.start()

        self.queue.waitLeft(0)
        self.queue.join()

        ## Aquire Bitmap format of the rendered image:
        film = currScene.getFilm()
        size = film.getSize()
        bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
        film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)

        ## End of render - get result
        result_image = np.array(
            bitmap.buffer()) if sys.platform == 'linux2' else np.array(
                bitmap.getNativeBuffer())
        # TODO : update Mitsuba version of Windows, with the updated API - bitmap.getNativeBuffer() doesn't exsists animore
        currSceneInfo = currScene.getAABB
        return result_image, currSceneInfo
コード例 #3
0
def modifyScene(scene, index, config, pmgr, destinationFolder):

    #for i in range(number_of_renderings):
    destination = destinationFolder + '-result_%03i' % index

    # Create a shallow copy of the scene so that the queue can tell apart the two
    # rendering processes. This takes almost no extra memory
    newScene = Scene(scene)

    # Create a sensor, film & sample generator
    newSensor = createSensor(pmgr, config, index)
    newSensor.configure()
    newScene.addSensor(newSensor)
    newScene.setSensor(newSensor)
    newScene.setDestinationFile(destination)

    # if 'envmap' in config:
    # 	addEnvmap(newScene, config, pmgr)

    newScene.configure()

    return (newScene)
コード例 #4
0
ファイル: Simulation.py プロジェクト: fdbesanto2/lessrt
def do_simulation_multiangle_seq(seqname):
    currdir = os.path.split(os.path.realpath(__file__))[0]
    sys.path.append(currdir + '/bin/rt/' + current_rt_program + '/python/2.7/')
    os.environ['PATH'] = currdir + '/bin/rt/' + current_rt_program + os.pathsep + os.environ['PATH']
    import mitsuba
    from mitsuba.core import Vector, Point, Ray, Thread, Scheduler, LocalWorker, PluginManager, Transform
    from mitsuba.render import SceneHandler
    from mitsuba.render import RenderQueue, RenderJob
    from mitsuba.render import Scene
    import multiprocessing

    scheduler = Scheduler.getInstance()
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()


    scene_path = session.get_scenefile_path()
    fileResolver = Thread.getThread().getFileResolver()
    fileResolver.appendPath(str(scene_path))
    scene = SceneHandler.loadScene(fileResolver.resolve(
        str(os.path.join(session.get_scenefile_path(), main_scene_xml_file))))
    scene.configure()
    scene.initialize()
    queue = RenderQueue()
    sceneResID = scheduler.registerResource(scene)
    bsphere = scene.getKDTree().getAABB().getBSphere()
    radius = bsphere.radius
    targetx, targety, targetz = bsphere.center[0], bsphere.center[1], bsphere.center[2]
    f = open(seqname + ".conf", 'r')
    params = json.load(f)
    obs_azimuth = params['seq1']['obs_azimuth']
    obs_zenith = params['seq2']['obs_zenith']
    cfgfile = session.get_config_file()
    f = open(cfgfile, 'r')
    cfg = json.load(f)
    viewR = cfg["sensor"]["obs_R"]
    mode = cfg["sensor"]["film_type"]
    azi_arr = map(lambda x: float(x), obs_azimuth.strip().split(":")[1].split(","))
    zeni_arr = map(lambda x: float(x), obs_zenith.strip().split(":")[1].split(","))
    seq_header = multi_file_prefix + "_" + seqname
    index = 0
    for azi in azi_arr:
        for zeni in zeni_arr:
            distFile = os.path.join(session.get_output_dir(),
                                    seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(".", "_"))
            newScene = Scene(scene)
            pmgr = PluginManager.getInstance()
            newSensor = pmgr.createObject(scene.getSensor().getProperties())
            theta = zeni / 180.0 * math.pi
            phi = (azi - 90) / 180.0 * math.pi
            scale_x = radius
            scale_z = radius
            toWorld = Transform.lookAt(
                Point(targetx - viewR * math.sin(theta) * math.cos(phi), targety + viewR * math.cos(theta),
                      targetz - viewR * math.sin(theta) * math.sin(phi)),  # original
                Point(targetx, targety, targetz),  # target
                Vector(0, 0, 1)  # up
            ) * Transform.scale(
                Vector(scale_x, scale_z, 1)  # 视场大小
            )
            newSensor.setWorldTransform(toWorld)
            newFilm = pmgr.createObject(scene.getFilm().getProperties())
            newFilm.configure()
            newSensor.addChild(newFilm)
            newSensor.configure()
            newScene.addSensor(newSensor)
            newScene.setSensor(newSensor)
            newScene.setSampler(scene.getSampler())
            newScene.setDestinationFile(str(distFile))
            job = RenderJob('Simulation Job' + "VA_"+str(azi)+"_VZ_"+str(zeni), newScene, queue, sceneResID)
            job.start()
        queue.waitLeft(0)
        queue.join()
    # handle npy
    if mode == "spectrum" and (output_format not in ("npy", "NPY")):
        for azi in azi_arr:
            for zeni in zeni_arr:
                distFile = os.path.join(session.get_output_dir(),
                                        seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(
                                            ".", "_"))
                data = np.load(distFile + ".npy")
                bandlist = cfg["sensor"]["bands"].split(",")
                RasterHelper.saveToHdr_no_transform(data, distFile, bandlist, output_format)
                os.remove(distFile + ".npy")