コード例 #1
0
def render(scene, filename):
    scheduler = Scheduler.getInstance()
    for i in range(multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, "wrk{}".format(i)))
    scheduler.start()

    queue = RenderQueue()
    scene.setDestinationFile(filename)
    job = RenderJob("myRenderJob", scene, queue)
    job.start()
    queue.waitLeft(0)
    queue.join()
    print(Statistics.getInstance().getStats())
コード例 #2
0
def render_trajectory_frames(mesh_path: Tuple[Path, Path],
                             trajectory: List[Transform],
                             queue: RenderQueue,
                             shutter_time: float,
                             width: int = 1920,
                             height: int = 1440,
                             fov: float = 60.,
                             num_samples: int = 32) -> None:
    """
    Render a camera trajectory through a mesh loaded from mesh_path[0] at the Transformations given in trajectory
    :param mesh_path: Path tuple (input_filepath, output_dirpath) of the mesh to render
    :param trajectory: List of Mitsuba Transformations, corresponding to cameras in the trajectory
    :param shutter_time: The camera shutter time. Controls the amount of motion blur between every pair of consecutive frames
    :param width: Parameter passed to cameras.create_sensor_from_transform
    :param height: Parameter passed to cameras.create_sensor_from_transform
    :param fov: The field of view
    :param num_samples: Parameter passed to cameras.create_sensor_from_transform
    :param queue: The Mitsuba render queue to use for all the frames
    :return:
    """
    input_path, output_path = mesh_path

    # Make Mesh
    mesh_obj = mesh.prepare_ply_mesh(
        input_path, util.get_predefined_spectrum('light_blue'))

    # Create sensors with animation transforms
    sensors = cameras.create_animated_sensors(trajectory, shutter_time, width,
                                              height, fov, num_samples)

    scene = util.construct_simple_scene([mesh_obj], sensors[0])

    with tqdm(
            total=len(sensors),
            bar_format=
            'Total {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}] {desc}',
            dynamic_ncols=True) as t:
        util.redirect_logger(tqdm.write, EError, t)
        t.write(input_path.stem)
        for idx, sensor in enumerate(sensors):
            # Make Scene
            scene.setSensor(sensor)
            scene.setDestinationFile(
                str(output_path / f'{input_path.stem}-{idx}.png'))
            # Make Result
            job = RenderJob(f'Render-{input_path.stem}-{idx}', scene, queue)
            job.start()
            queue.waitLeft(0)
            queue.join()
            t.update()
コード例 #3
0
    def initializeMitsuba(self):
        # Start up the scheduling system with one worker per local core
        self.scheduler = Scheduler.getInstance()
        for i in range(0, multiprocessing.cpu_count()):
            self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))

        self.scheduler.start()
        # Create a queue for tracking render jobs
        self.queue = RenderQueue()
        # Get a reference to the plugin manager
        self.pmgr = PluginManager.getInstance()
        # Process Mitsuba log and progress messages within Python

        class CustomAppender(Appender):
            def append(self2, logLevel, message):
                print(message)

            def logProgress(self2, progress, name, formatted, eta):
                # Asynchronously notify the main thread
                self.renderProgress.emit(progress)

        logger = Thread.getThread().getLogger()
        logger.setLogLevel(EWarn) # Display warning & error messages
        logger.clearAppenders()
        logger.addAppender(CustomAppender())

        def closeEvent(self, e):
            self.job.cancel()
            self.queue.join()
            self.scheduler.stop()
コード例 #4
0
    def __init__(self):
        # add path for the Python extension module
        if 'MITSUBA_DIR' not in os.environ:
            raise Exception(
                'Set MITSUBA_DIR env variable, or source Mitsuba setpath.sh')
        MITSUBA_DIR = os.path.join(os.environ['MITSUBA_DIR'], 'dist')
        sys.path.append(MITSUBA_DIR + os.sep + 'python' + os.sep + '2.7')

        # Ensure python can find Mitsuba core libraries
        os.environ['PATH'] = MITSUBA_DIR + os.pathsep + os.environ['PATH']

        # set up logger
        FORMAT = '%(asctime)-15s [%(levelname)s] %(message)s'
        logging.basicConfig(format=FORMAT)
        self.log = logging.getLogger('mts-render')
        self.log.setLevel(logging.INFO)

        # default setup
        self.setup = {
            'integrator': 'path',  # choices: path, mlt, vpl, ao
            'emitter': 'constant',
            'sensor': 'perspective',
            'film': 'ldrfilm',
            'sampler': 'ldsampler',
            'pixelFormat': 'rgba',
            'exposure': 0.0,  #Exposure value e, scales radiance by 2^e
            'banner': False,
            'camera_up': Vector(0, 1, 0),
            'eye': Point(0, 0, 0),
            'target': Point(0, 0, 0),
            'sampleCount': 256,
            'fov': 90.0,
            'width': 640,
            'height': 480,
        }

        # Start up the scheduling system
        self.scheduler = Scheduler.getInstance()
        for i in range(0, multiprocessing.cpu_count()):
            self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))

        self.queue = RenderQueue()
        self.pmgr = PluginManager.getInstance()
コード例 #5
0
        def __init__(self, base_path,scene_name,params):    

                self.params = params
                self.light = []
                # Get a reference to the thread's file resolver
                self.fileResolver = Thread.getThread().getFileResolver()
                scenes_path = base_path + '/' + scene_name + '/mitsuba' 
                self.fileResolver.appendPath(scenes_path)
                paramMap = StringMap()
                paramMap['myParameter'] = 'value'
                # Load the scene from an XML file
                self.scene = SceneHandler.loadScene(self.fileResolver.resolve(scene_name + '.xml'), paramMap)

                self.scheduler = Scheduler.getInstance()
                # Start up the scheduling system with one worker per local core
                for i in range(0, multiprocessing.cpu_count()):
                        self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
                self.scheduler.start()
                # Create a queue for tracking render jobs
                self.queue = RenderQueue()
                self.sceneResID = self.scheduler.registerResource(self.scene)
コード例 #6
0
    def __init__(self, base_path, scene_name, params, screenParams):

        self.params = params
        self.screenParams = screenParams
        self.light = []
        # Get a reference to the thread's file resolver
        self.fileResolver = Thread.getThread().getFileResolver()
        scenes_path = base_path + '/' + scene_name + '/mitsuba'
        self.fileResolver.appendPath(scenes_path)
        paramMap = StringMap()
        paramMap['myParameter'] = 'value'

        ## Load the scene from an XML file
        self.scene = SceneHandler.loadScene(
            self.fileResolver.resolve(scene_name + '.xml'), paramMap)

        ## Setting & adding emmiters to scene - diffusive screen, created out of multiple sub-screens
        # self.SetWideScreen()
        # mitsuba.SetSunSky(np.array([[3, 300,3, 0,0,0, 0,0,1]]))
        # TODO : fix overidin of : self.SetWideScreen(params['screenWidth'] , params['screenHeight'],params['resXScreen'],params['resYScreen'], params['screenZPos'],params['variantRadiance'])
        self.addSceneLights()

        ## Simultaneously rendering multiple versions of a scene
        self.scene.initialize()
        self.scheduler = Scheduler.getInstance()
        ## Start up the scheduling system with one worker per local core
        if 'SYS_NAME' in os.environ and os.environ['SYS_NAME'] == 'AWS':
            maxThreads = multiprocessing.cpu_count()
        else:
            maxThreads = min(multiprocessing.cpu_count(), 30)
        for i in range(0, maxThreads):
            self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
        self.scheduler.start()

        ## Create a queue for tracking render jobs
        self.queue = RenderQueue()
        self.sceneResID = self.scheduler.registerResource(self.scene)
コード例 #7
0
def main():

    scene = makeScene()

    #print(scene)

    scheduler = Scheduler.getInstance()

    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, "wrk{}".format(i)))

    scheduler.start()

    queue = RenderQueue()

    scene.setDestinationFile("renderedResult")

    job = RenderJob("MyRenderJob", scene, queue)
    job.start()

    queue.waitLeft(0)
    queue.join()

    print(Statistics.getInstance().getStats())
コード例 #8
0
def render(scene, filename):
    scheduler = Scheduler.getInstance()
    for i in range(multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, "wrk{}".format(i)))
    scheduler.start()

    queue = RenderQueue()
    scene.setDestinationFile(filename)
    job = RenderJob("myRenderJob", scene, queue)
    job.start()
    queue.waitLeft(0)
    queue.join()
    print(Statistics.getInstance().getStats())
コード例 #9
0
def renderScene(scene, index, sceneResID):

    # Create a queue for tracking render jobs
    queue = RenderQueue()

    # Create a render job and insert it into the queue. Note how the resource
    # ID of the original scene is provided to avoid sending the full scene
    # contents over the network multiple times.
    job = RenderJob('myRenderJob' + str(index), scene, queue, sceneResID)
    job.start()
    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
    queue.join()
コード例 #10
0
def prepare_queue(num_local_workers, remote_stream=None) -> RenderQueue:
    """
    Prepare a Mitsuba render queue with the given amount of workers
    :param num_local_workers: Number of local rendering workers on the CPU (corresponding to CPU cores fully used during rendering)
    :param remote_stream: TODO
    :return: A mitsuba RenderQueue object
    """
    scheduler = Scheduler.getInstance()
    queue = RenderQueue()
    for worker_idx in range(num_local_workers):
        local_worker = LocalWorker(worker_idx, f'LocalWorker-{worker_idx}')
        scheduler.registerWorker(local_worker)
    if remote_stream is not None:
        for idx, stream in enumerate(remote_stream):
            remote_worker = RemoteWorker(f'RemoteWorker-{idx}', stream)
            scheduler.registerWorker(remote_worker)
    scheduler.start()

    return queue
コード例 #11
0
ファイル: Simulation.py プロジェクト: fdbesanto2/lessrt
def do_simulation_multi_spectral_py():
    currdir = os.path.split(os.path.realpath(__file__))[0]
    sys.path.append(currdir + '/bin/rt/' + current_rt_program + '/python/2.7/')
    os.environ['PATH'] = currdir + '/bin/rt/' + current_rt_program + os.pathsep + os.environ['PATH']
    import mitsuba
    from mitsuba.core import Vector, Point, Ray, Thread, Scheduler, LocalWorker, PluginManager, Transform
    from mitsuba.render import SceneHandler
    from mitsuba.render import RenderQueue, RenderJob
    from mitsuba.render import Scene
    import multiprocessing

    scheduler = Scheduler.getInstance()
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()

    cfgfile = session.get_config_file()
    f = open(cfgfile, 'r')
    cfg = json.load(f)
    distFileName = spectral_img_prefix + "_VZ=" + str(cfg["observation"]["obs_zenith"]) + \
                   "_VA=" + str(cfg["observation"]["obs_azimuth"])
    distFile = os.path.join(session.get_output_dir(), distFileName).encode("utf-8")
    scene_file_path = os.path.join(session.get_scenefile_path(), main_scene_xml_file).encode("utf-8")

    scene_path = session.get_scenefile_path().encode("utf-8")
    fileResolver = Thread.getThread().getFileResolver()
    fileResolver.appendPath(scene_path)
    scene = SceneHandler.loadScene(fileResolver.resolve(scene_file_path))
    scene.setDestinationFile(distFile)
    scene.configure()
    scene.initialize()
    queue = RenderQueue()
    sceneResID = scheduler.registerResource(scene)
    job = RenderJob(('Simulation Job '+distFileName).encode("utf-8"), scene, queue, sceneResID)
    job.start()
    queue.waitLeft(0)
    queue.join()

    if output_format not in ("npy", "NPY") and os.path.exists(distFile + ".npy"):
        data = np.load(distFile + ".npy")
        bandlist = cfg["sensor"]["bands"].split(",")
        RasterHelper.saveToHdr_no_transform(data, distFile, bandlist, output_format)
        os.remove(distFile + ".npy")
    log("INFO: Finished")
コード例 #12
0
    def __run_mitsuba(self):
        self.mitsuba_scene.configure()

        scheduler = Scheduler.getInstance()
        for i in range(multiprocessing.cpu_count()):
            scheduler.registerWorker(LocalWorker(i, "worker_{}".format(i)))
        scheduler.start()

        queue = RenderQueue()
        self.mitsuba_scene.setDestinationFile(self.image_name)

        job = RenderJob("render job: {}".format(self.image_name),
                        self.mitsuba_scene, queue)
        job.start()

        queue.waitLeft(0)
        queue.join()

        print(Statistics.getInstance().getStats())
        scheduler.stop()
コード例 #13
0
ファイル: MaSsMitsuba.py プロジェクト: Angel07/MaSs
class MitsubaDemo(QDialog):
	renderProgress = Signal(int)

	def __init__(self, parent, width, height, barHeight, progressHeight, folderPath, sceneName, mitsubaPath, frameNumber, scenePath, renderWindowPosX, renderWindowPosY, pluginPath):
		QDialog.__init__( self, parent )

		#global isWindowExist
		#isWindowExist = True
		#global window
		#window = self

		self.startRender = True

		self.folderPath = folderPath
		self.mitsubaPath = mitsubaPath
		self.frameNumber = frameNumber
		self.scenePath = scenePath

	
		# Initialize Mitsuba
		if renderWindowPosX == -1 and renderWindowPosY == -1:
			self.setGeometry((GetSystemMetrics(0) - width - 20) / 2,(GetSystemMetrics(1) - height - barHeight - progressHeight - 40)/2, width + 20, height + barHeight + progressHeight + 40)
		else:
			self.setGeometry(renderWindowPosX, renderWindowPosY, width + 20, height + barHeight + progressHeight + 40)

		self.Colorize()

		btnStop = QtGui.QPushButton("Stop", self)
		btnStop.setMaximumSize(70, barHeight)
		btnRefresh = QtGui.QPushButton("Refresh", self)
		btnRefresh.setMaximumSize(80, barHeight)
		btnSave = QtGui.QPushButton("Save", self)
		btnSave.setMaximumSize(70, barHeight)
		
		self.connect(btnStop, SIGNAL('clicked()'), self.stopMethod)
		self.connect(btnRefresh, SIGNAL('clicked()'), self.refreshMethod)
		self.connect(btnSave, SIGNAL('clicked()'), self.saveMethod)
		doneLabel = QLabel()
		font = QtGui.QFont('Tahoma', 14)
		font.setBold(True)
		doneLabel.setFont(font)
		doneLabel.setText("Rendering...")
		doneLabel.setStyleSheet("QLabel { color: #d37700; }");

		sIcon = QIcon(pluginPath + "\\icons\\" +  "save_3118.png")
		btnSave.setIcon(sIcon)

		stopIcon = QIcon(pluginPath + "\\icons\\" +  "stop_32_1340.png")
		btnStop.setIcon(stopIcon)

		rIcon = QIcon(pluginPath + "\\icons\\" +  "view-refresh_2730.png")
		btnRefresh.setIcon(rIcon)

		imgLabel = QLabel()

		qp = QtGui.QPainter()
		
		layout = QGridLayout(self)
		layout.setSpacing(10)
		layout.addWidget(btnSave, 1, 0)
		layout.addWidget(btnRefresh, 1, 1)
		layout.addWidget(btnStop, 1, 2)
		layout.addWidget(imgLabel, 2, 0, 1, 4)
		

		self.initializeMitsuba()
		self.scene = SceneHandler.loadScene(folderPath + "\\" + sceneName)

		self.show()

		self.job = RenderJob('rjob', self.scene, self.queue)

		self.job.setInteractive(True)
		# Initialize the user interface
		progress = QtGui.QProgressBar(self)
		layout.addWidget(doneLabel, 3, 0, 1, 3)
		layout.addWidget(progress, 3, 3)
		

		self.rwidget = RenderWidget(self, self.queue, self.scene.getFilm().getSize())
		self.setWindowTitle('Mitsuba render window')
		# Hide the scroll bar once the rendering is done
		def renderingUpdated(event):
			progress.setValue(100 * self.rwidget.extProgress / (self.rwidget.extIMG.width() * self.rwidget.extIMG.height()))
			imgLabel.setPixmap(QPixmap.fromImage(self.rwidget.extIMG));
			if event == MitsubaRenderBuffer.RENDERING_FINISHED:
				lm("FINISH!!!")
				self.startRender = False
				#progress.hide()
				doneLabel.setText("Done!")
				doneLabel.setStyleSheet("QLabel { color: #00d328; }");

		self.rwidget.renderingUpdated.connect(renderingUpdated, Qt.QueuedConnection)
		# Start the rendering process
		self.job.start()


	def initializeMitsuba(self):
		# Start up the scheduling system with one worker per local core
		self.scheduler = Scheduler.getInstance()
		if self.scheduler.isRunning() == True:
			self.scheduler.stop()
		for i in range(0, multiprocessing.cpu_count()):
			self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
		self.scheduler.start()

		# Create a queue for tracking render jobs
		self.queue = RenderQueue()
		# Get a reference to the plugin manager
		self.pmgr = PluginManager.getInstance()

		# Process Mitsuba log and progress messages within Python
		class CustomAppender(Appender):
			def append(self2, logLevel, message):
				print(message)
			def logProgress(self2, progress, name, formatted, eta):
				# Asynchronously notify the main thread
				self.renderProgress.emit(progress)

		logger = Thread.getThread().getLogger()
		logger.setLogLevel(EWarn) # Display warning & error messages
		logger.clearAppenders()
		logger.addAppender(CustomAppender())

	def stopMethod(self):
		lm("stop workers=" + str(self.scheduler.getWorkerCount()))
		i = self.scheduler.getWorkerCount() - 1
		while (i > 0 or i == 0):
			worker = self.scheduler.getWorker(i)
			self.scheduler.unregisterWorker(worker)
			i = i - 1
		self.queue.join()
		self.scheduler.stop()


	def closeEvent(self, e):
		self.stopMethod()
		

	def keyPressEvent(self, e):
		if e.key() == Qt.Key_Escape:
			self.close()

	def refreshMethod(self):
		Refresh(self.frameNumber, self, self.geometry().x(), self.geometry().y())

	def saveMethod(self):
		if self.startRender == False:
			filename = QtGui.QFileDialog.getSaveFileName(self, 'Open file', self.folderPath, "Images (*.png *.jpg)")
			self.rwidget.extIMG.save(filename)

	def Colorize(self):
		self.strStyleSheet = """
                                QDialog { background-color: #222; color: #FFF; }


                                QLabel { background-color: #222; }

                                

                                QPushButton { 
                                                background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1,
                                                    stop: 0 #555,
                                                    stop: 1 #444 );
                                                border: 3;
                                                border-radius: 3px;
                                                color: #EEE;
                                                padding: 20px 10px;
                                                }
                                QPushButton:hover { 
                                                    background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1,
                                                    stop: 0 #666,
                                                    stop: 1 #555 );
                                                    border: 0.5px outset #444; }
                                QPushButton:pressed { 
                                                        background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1,
                                                        stop: 0 #666,
                                                        stop: 1 #444 );
                                                        border: 0.5px outset #444; }
                                QProgressBar{
													    border: 2px solid grey;
													    border-radius: 5px;
													    text-align: center
													}

                            """
		self.setStyleSheet(self.strStyleSheet)
コード例 #14
0
ファイル: bunny.py プロジェクト: theodorekim/QUIJIBO
paramMap = StringMap()
scene = SceneHandler.loadScene(
    fileResolver.resolve(
        "/Users/tkim/CONF/QUIJIBO/renders/python/bunny_zoom_0_1.xml"),
    paramMap)

from mitsuba.core import *
from mitsuba.render import RenderQueue, RenderJob
import multiprocessing

scheduler = Scheduler.getInstance()
# Start up the scheduling system with one worker per local core
for i in range(0, 4):
    scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
scheduler.start()
queue = RenderQueue()

import numpy as np
from scipy import *

target0 = Vector(0.681017, 0.344339, 0.841651)
origin0 = Vector(1.06275, -0.147389, 1.62427)
up0 = Vector(-0.180728, -0.870101, -0.458544)

target1 = Vector(0.0329559, 0.23162, 0.731513)
origin1 = Vector(0.975366, 0.0865425, 0.430146)
up1 = Vector(-0.0605996, -0.960187, 0.272722)

sensor = scene.getSensor()

frames = 60
コード例 #15
0
def renderVPLS(vpls, cam, scene, target):
    ''' render VPLS having the camera looking at the desired target the scene. The result will 
	be an image that will be used to define the 3D space where the camera and
	object can be placed in the environment. Target can be either be 'roof' or 'floor' '''

    pmgr = PluginManager.getInstance()
    scheduler = Scheduler.getInstance()

    if (target == 'roof'):
        target_height = scene.HeightRoof
    else:
        target_height = scene.HeightFloor

    # Start up the scheduling system with one worker per local core
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()

    # Create a queue for tracking render jobs
    queue = RenderQueue()

    nVPLS = int(vpls[0][1]) * 4

    scene = Scene()

    for i in xrange(1, nVPLS, 4):
        if (float(vpls[i][2]) == target_height):
            scene.addChild(
                pmgr.create({
                    'type':
                    'sphere',
                    'center':
                    Point(float(vpls[i][1]), float(vpls[i][2]),
                          float(vpls[i][3])),
                    'radius':
                    1.0,
                    'emitter':
                    pmgr.create({
                        'type': 'area',
                        'radiance': Spectrum(10.),
                    })
                }))

    scene.addChild(
        pmgr.create({
            'type':
            'perspective',
            'toWorld':
            Transform.lookAt(
                Point(cam.origin[0], cam.origin[1], cam.origin[2]),
                Point(cam.target[0], cam.target[1], cam.target[2]),
                Vector(cam.up[0], cam.up[1], cam.up[2])),
            'fov':
            cam.fov,
            'film': {
                'type': 'ldrfilm',
                'width': cam.width,
                'height': cam.height,
                'banner': False,
            },
            'sampler': {
                'type': 'halton',
                'sampleCount': 1
            },
        }))

    scene.addChild(pmgr.create({'type': 'direct'}))
    scene.configure()

    if (target == 'roof'):
        filename = 'renderVPLSRoof'
    else:
        filename = 'renderVPLSFloor'

    scene.setDestinationFile(filename)

    # Create a render job and insert it into the queue
    job = RenderJob('myRenderJob', scene, queue)
    job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
    queue.join()

    scheduler.stop()
コード例 #16
0
class MitsubaDemo(QMainWindow):
    renderProgress = Signal(int)

    def __init__(self):
        super(MitsubaDemo, self).__init__()
        # Initialize Mitsuba
        self.initializeMitsuba()
        self.job = self.createRenderJob()
        self.job.setInteractive(True)
        # Initialize the user interface
        status = self.statusBar()
        self.rwidget = RenderWidget(self, self.queue, self.scene.getFilm().getSize())
        progress = QProgressBar(status)
        status.setContentsMargins(0,0,5,0)
        status.addPermanentWidget(progress)
        status.setSizeGripEnabled(False)
        self.setWindowTitle('Mitsuba/PyQt demo')
        self.setCentralWidget(self.rwidget)

        # Hide the scroll bar once the rendering is done
        def renderingUpdated(event):

            if event == MitsubaRenderBuffer.RENDERING_FINISHED:
                status.showMessage("Done.")
                progress.hide()

        self.renderProgress.connect(progress.setValue, Qt.QueuedConnection)
        self.rwidget.renderingUpdated.connect(renderingUpdated, Qt.QueuedConnection)

        # Start the rendering process
        status.showMessage("Rendering ..")
        self.job.start()

    def initializeMitsuba(self):
        # Start up the scheduling system with one worker per local core
        self.scheduler = Scheduler.getInstance()
        for i in range(0, multiprocessing.cpu_count()):
            self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))

        self.scheduler.start()
        # Create a queue for tracking render jobs
        self.queue = RenderQueue()
        # Get a reference to the plugin manager
        self.pmgr = PluginManager.getInstance()
        # Process Mitsuba log and progress messages within Python

        class CustomAppender(Appender):
            def append(self2, logLevel, message):
                print(message)

            def logProgress(self2, progress, name, formatted, eta):
                # Asynchronously notify the main thread
                self.renderProgress.emit(progress)

        logger = Thread.getThread().getLogger()
        logger.setLogLevel(EWarn) # Display warning & error messages
        logger.clearAppenders()
        logger.addAppender(CustomAppender())

        def closeEvent(self, e):
            self.job.cancel()
            self.queue.join()
            self.scheduler.stop()

    def createRenderJob(self):
        self.scene = self.pmgr.create({
        'type' : 'scene',
        'sphere' : {
        'type' : 'sphere',
        },
        'envmap' : {
        'type' : 'sky'
        },
        'sensor' : {
        'type' : 'perspective',
        'toWorld' : Transform.translate(Vector(0, 0, -5)),
        'sampler' : {
        'type' : 'halton',
        'sampleCount' : 64
        }
        }
        })

        return RenderJob('rjob', self.scene, self.queue)

    def keyPressEvent(self, e):
        if e.key() == Qt.Key_Escape:
            self.close()
コード例 #17
0
ファイル: pure_api.py プロジェクト: vidarn/cloth-shader
        class InternalRenderContext:
            '''
            Mitsuba Internal Python API Render
            '''

            RENDER_API_TYPE = 'INT'

            cancelled = False
            thread = None
            scheduler = None
            fresolver = None
            log_level = {
                'default': EWarn,
                'verbose': EInfo,
                'quiet': EError,
            }

            def __init__(self):
                self.fresolver = main_fresolver.clone()
                self.thread = Thread.registerUnmanagedThread('renderer')
                self.thread.setFileResolver(self.fresolver)
                self.thread.setLogger(main_logger)

                self.render_engine = MtsManager.RenderEngine
                self.render_scene = MtsManager.CurrentScene

                if self.render_engine.is_preview:
                    verbosity = 'quiet'

                else:
                    verbosity = self.render_scene.mitsuba_engine.log_verbosity

                main_logger.setLogLevel(self.log_level[verbosity])

            def set_scene(self, export_context):
                if export_context.EXPORT_API_TYPE == 'FILE':
                    scene_path, scene_file = os.path.split(efutil.filesystem_path(export_context.file_names[0]))
                    self.fresolver.appendPath(scene_path)
                    self.scene = SceneHandler.loadScene(self.fresolver.resolve(scene_file))

                elif export_context.EXPORT_API_TYPE == 'API':
                    self.scene = export_context.scene

                else:
                    raise Exception('Unknown exporter type')

            def render_start(self, dest_file):
                self.cancelled = False
                self.queue = RenderQueue()
                self.buffer = InternalBufferDisplay(self)
                self.job = RenderJob('mtsblend_render', self.scene, self.queue)
                self.job.start()

                #out_file = FileStream(dest_file, FileStream.ETruncReadWrite)
                #self.bitmap.write(Bitmap.EPNG, out_file)
                #out_file.close()

            def render_stop(self):
                self.job.cancel()
                # Wait for the render job to finish
                self.queue.waitLeft(0)

            def render_cancel(self):
                self.cancelled = True
                MtsLog('Cancelling render.')

            def test_break(self):
                return self.render_engine.test_break() or self.cancelled

            def is_running(self):
                return self.job.isRunning()

            def returncode(self):
                return 0

            def wait_timer(self):
                pass
コード例 #18
0
ファイル: pure_api.py プロジェクト: vidarn/cloth-shader
 def render_start(self, dest_file):
     self.cancelled = False
     self.queue = RenderQueue()
     self.buffer = InternalBufferDisplay(self)
     self.job = RenderJob('mtsblend_render', self.scene, self.queue)
     self.job.start()
コード例 #19
0
def main(argv):
    scene_xml_name = ''
    camera_txt_name = ''
    output_folder = ''
    camera_indicate_name = ''

    # Read command line args
    opts, args = getopt.getopt(argv, "s:c:o:h:g:")
    for opt, arg in opts:
        if opt == '-h':
            print 'mitsuba_render.py -s <scene_xml> -c <camera_file> -g <good_camera_indicator_file> -o <output_directory>'
            sys.exit()
        elif opt in ("-s", "--scenefile"):
            scene_xml_name = arg
        elif opt in ("-c", "--camerafile"):
            camera_txt_name = arg
        elif opt in ("-o", "--outputdir"):
            output_folder = arg
        elif opt in ("-g", "--goodcamerafile"):
            camera_indicate_name = arg

    print('Render job for %s starts...\n' % scene_xml_name)

    if not os.path.exists(output_folder):
        os.makedirs(output_folder)
    camera = np.loadtxt(camera_txt_name)
    goodcam = np.loadtxt(camera_indicate_name)

    last_dest = output_folder + '%06i_mlt.png' % (len(camera) - 1)
    if os.path.isfile(last_dest):
        print('Job is done in the first round.\n')
        return

    tmp = scene_xml_name.split('/')
    work_path = '/'.join(tmp[0:-1])

    os.chdir(work_path)

    paramMap = StringMap()
    paramMap['emitter_scale'] = '75'
    paramMap['fov'] = '63.414969'
    paramMap['origin'] = '0 0 0'
    paramMap['target'] = '1 1 1'
    paramMap['up'] = '1 0 0'
    paramMap['sampler'] = '1024'
    paramMap['width'] = '640'
    paramMap['height'] = '480'
    paramMap['envmap_path'] = './util_data/HDR_111_Parking_Lot_2_Ref.hdr'
    paramMap['default_texture_path'] = './util_data/wallp_0.jpg'

    fileResolver = Thread.getThread().getFileResolver()
    scene = SceneHandler.loadScene(fileResolver.resolve(scene_xml_name),
                                   paramMap)

    scheduler = Scheduler.getInstance()
    # Start up the scheduling system with one worker per local core
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()
    # Create a queue for tracking render jobs
    queue = RenderQueue()

    from mitsuba.render import Scene
    sensor = scene.getSensor()
    scene.initialize()

    for i in range(0, len(camera)):
        if goodcam[i] < 0.5:
            continue

        destination = output_folder + '%06i_mlt' % i
        if os.path.isfile(destination + '.rgbe'):
            continue

        c = camera[i]
        t = Transform.lookAt(Point(c[0], c[1], c[2]),
                             Point(c[0] + c[3], c[1] + c[4], c[2] + c[5]),
                             Vector(c[6], c[7], c[8]))
        sensor.setWorldTransform(t)
        scene.setDestinationFile(destination)

        # # Create a render job and insert it into the queue. Note how the resource
        # # ID of the original scene is provided to avoid sending the full scene
        # # contents over the network multiple times.
        job = RenderJob('myRenderJob' + str(i), scene, queue)
        job.start()
        queue.waitLeft(0)

    print('%d render jobs finished!\n' % len(camera))
コード例 #20
0
class Mitsuba(object):
        
    # Constructor
        def __init__(self, base_path,scene_name,params):    

                self.params = params
                self.light = []
                # Get a reference to the thread's file resolver
                self.fileResolver = Thread.getThread().getFileResolver()
                scenes_path = base_path + '/' + scene_name + '/mitsuba' 
                self.fileResolver.appendPath(scenes_path)
                paramMap = StringMap()
                paramMap['myParameter'] = 'value'
                # Load the scene from an XML file
                self.scene = SceneHandler.loadScene(self.fileResolver.resolve(scene_name + '.xml'), paramMap)

                self.scheduler = Scheduler.getInstance()
                # Start up the scheduling system with one worker per local core
                for i in range(0, multiprocessing.cpu_count()):
                        self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
                self.scheduler.start()
                # Create a queue for tracking render jobs
                self.queue = RenderQueue()
                self.sceneResID = self.scheduler.registerResource(self.scene)

        # ----------------------SET SUNSKY -----------------------
        def SetSunSky(self, dir_vec, radiance=1):
                pmgr = PluginManager.getInstance()
                obj = pmgr.create({
                    'type' : 'sun',
                    'radiance' : Spectrum(radiance)
                })
                self.light.append(obj)
                #self.light = obj
        
        # ----------------------SET SPOTLIGHT -----------------------
        def SetSpotlight(self, dir_vec):
                pmgr = PluginManager.getInstance()
                obj = pmgr.create({
                    'type' : 'spot',
                    'cutoffAngle' : 40.0,
                    'intensity' : Spectrum(50),
                    'toWorld' : Transform.lookAt(
                        Point( dir_vec[0,0] , dir_vec[0,1] , dir_vec[0,2]),
                        Point( dir_vec[0,3] , dir_vec[0,4] , dir_vec[0,5]),
                        Vector(dir_vec[0,6] , dir_vec[0,7] , dir_vec[0,8])
                    )
                })
                self.light.append(obj)
#                self.light = obj
                                    
        # ----------------------SET SCREEN -----------------------
        def SetRectangleScreen(self, screenPos, radiance, dx, dy):
                pmgr = PluginManager.getInstance()
                resctScreen = pmgr.create({
                    'type' : 'rectangle',
                    'bsdf': {
                            'type': 'diffuse',
                            'reflectance' : Spectrum(0.78)

                    },
                    'toWorld' : Transform.translate(Vector (screenPos[0], screenPos[1], screenPos[2])) * Transform.rotate (Vector(1, 0,0), 180.0) * Transform .scale(Vector(dx / 2, dy / 2, 1)),
                    'emitter': {
                            'type': 'area',
                            'radiance': Spectrum(radiance)
                    }
                   })
                self.light.append(resctScreen)                                    
        # ----------------------SET WIDE SCREEN -----------------------
        def SetWideScreen(self, width = 50.0 , height = 20.0, resX = 1, resY = 1, distance = 2, rand = False):
                screenXCorners = width/2* np.array([-1 , 1])
                screenYCorners = height/2*np.array([-1 , 1])
                dx = width / resX
                dy = height / resY
                
                screenX = np.linspace( screenXCorners [0] + dx / 2, screenXCorners [1] - dx / 2, num=resX)
                screenY = np.linspace( screenYCorners [0] + dy / 2, screenYCorners [1] - dy / 2, num=resY)
                for x in screenX:
                        for y in screenY:
                                curRadiance = np.random.uniform(0.0, 1.0) if rand else 1.0
                                self.SetRectangleScreen( np.array([x, y, distance]), curRadiance, dx, dy)

        # ----------------------SET CAMERA -----------------------
        def SetCamera(self,dir_vec):
                pmgr = PluginManager.getInstance()
                obj = pmgr.create({
                    'type' : 'perspective',
                    'toWorld' : Transform.lookAt(
                        Point( dir_vec[0,0] , dir_vec[0,1] , dir_vec[0,2]),
                        Point( dir_vec[0,3] , dir_vec[0,4] , dir_vec[0,5]),
                        Vector(dir_vec[0,6] , dir_vec[0,7] , dir_vec[0,8])
                    ),
                    #'focalLength': self.params['focalLength'],
                    'fov': self.params['fov'],
                    'fovAxis': self.params['fovAxis'],                    
                    'film' : {
                        'type' : 'hdrfilm',  #'mfilm',  #'ldrfilm',
                        'width' :  self.params['camWidth'],
                        'height' : self.params['camHeight'],
                    },
                    'sampler' : {
                               'type' :'ldsampler',
                               'sampleCount' : self.params['sampleCount']
                    },
                   'medium' : {
                                'type' : 'homogeneous',
                                'id': 'underwater',
                                'scale': 0.5, 
                                'sigmaS' : Spectrum([0.4, 0.3, 0.3]),  #[0.02, 0.02, 0.02]),
                                'sigmaA' : Spectrum([0.45, 0.06, 0.05]),  #[0.3, 0.3, 0.3]),
                                #'thikness': 1, 
                                'phase' : {
                                        'type' : 'hg',
                                        'g' : 0.9
                                    }    
                                
                    }                            
                }) 
                self.cam = obj
                
        def __createSampler(self,sampleCount):
                pmgr = PluginManager.getInstance()
                obj = pmgr.create({
                    'type' : 'ldsampler',  #'independent',
                    'sampleCount' : sampleCount 
                    })
                self.sampler = obj                
                
                
        # ----------------------RENDER-----------------------         
        def Render(self,sampleCount):
                currScene = Scene(self.scene)
                for light in self.light:
                        currScene.addChild(light)
                currScene.configure()    
                currScene.addSensor(self.cam)   
                currScene.setSensor(self.cam) 
                self.__createSampler(sampleCount) # sample count
                currScene.setSampler(self.sampler)
             
                currScene.setDestinationFile('')
                # Create a render job and insert it into the queue
                job = RenderJob('myRenderJob', currScene, self.queue )
                job.start()
                self.queue.waitLeft(0)
                self.queue.join()
                
                film = currScene.getFilm()
                size = film.getSize()
                bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
                film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)
                # End of render - get result
                result_image = np.array(bitmap.getNativeBuffer())                                
                currSceneInfo = currScene.getAABB
                return result_image, currSceneInfo
コード例 #21
0
ファイル: Simulation.py プロジェクト: fdbesanto2/lessrt
def do_simulation_multiangle_seq(seqname):
    currdir = os.path.split(os.path.realpath(__file__))[0]
    sys.path.append(currdir + '/bin/rt/' + current_rt_program + '/python/2.7/')
    os.environ['PATH'] = currdir + '/bin/rt/' + current_rt_program + os.pathsep + os.environ['PATH']
    import mitsuba
    from mitsuba.core import Vector, Point, Ray, Thread, Scheduler, LocalWorker, PluginManager, Transform
    from mitsuba.render import SceneHandler
    from mitsuba.render import RenderQueue, RenderJob
    from mitsuba.render import Scene
    import multiprocessing

    scheduler = Scheduler.getInstance()
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()


    scene_path = session.get_scenefile_path()
    fileResolver = Thread.getThread().getFileResolver()
    fileResolver.appendPath(str(scene_path))
    scene = SceneHandler.loadScene(fileResolver.resolve(
        str(os.path.join(session.get_scenefile_path(), main_scene_xml_file))))
    scene.configure()
    scene.initialize()
    queue = RenderQueue()
    sceneResID = scheduler.registerResource(scene)
    bsphere = scene.getKDTree().getAABB().getBSphere()
    radius = bsphere.radius
    targetx, targety, targetz = bsphere.center[0], bsphere.center[1], bsphere.center[2]
    f = open(seqname + ".conf", 'r')
    params = json.load(f)
    obs_azimuth = params['seq1']['obs_azimuth']
    obs_zenith = params['seq2']['obs_zenith']
    cfgfile = session.get_config_file()
    f = open(cfgfile, 'r')
    cfg = json.load(f)
    viewR = cfg["sensor"]["obs_R"]
    mode = cfg["sensor"]["film_type"]
    azi_arr = map(lambda x: float(x), obs_azimuth.strip().split(":")[1].split(","))
    zeni_arr = map(lambda x: float(x), obs_zenith.strip().split(":")[1].split(","))
    seq_header = multi_file_prefix + "_" + seqname
    index = 0
    for azi in azi_arr:
        for zeni in zeni_arr:
            distFile = os.path.join(session.get_output_dir(),
                                    seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(".", "_"))
            newScene = Scene(scene)
            pmgr = PluginManager.getInstance()
            newSensor = pmgr.createObject(scene.getSensor().getProperties())
            theta = zeni / 180.0 * math.pi
            phi = (azi - 90) / 180.0 * math.pi
            scale_x = radius
            scale_z = radius
            toWorld = Transform.lookAt(
                Point(targetx - viewR * math.sin(theta) * math.cos(phi), targety + viewR * math.cos(theta),
                      targetz - viewR * math.sin(theta) * math.sin(phi)),  # original
                Point(targetx, targety, targetz),  # target
                Vector(0, 0, 1)  # up
            ) * Transform.scale(
                Vector(scale_x, scale_z, 1)  # 视场大小
            )
            newSensor.setWorldTransform(toWorld)
            newFilm = pmgr.createObject(scene.getFilm().getProperties())
            newFilm.configure()
            newSensor.addChild(newFilm)
            newSensor.configure()
            newScene.addSensor(newSensor)
            newScene.setSensor(newSensor)
            newScene.setSampler(scene.getSampler())
            newScene.setDestinationFile(str(distFile))
            job = RenderJob('Simulation Job' + "VA_"+str(azi)+"_VZ_"+str(zeni), newScene, queue, sceneResID)
            job.start()
        queue.waitLeft(0)
        queue.join()
    # handle npy
    if mode == "spectrum" and (output_format not in ("npy", "NPY")):
        for azi in azi_arr:
            for zeni in zeni_arr:
                distFile = os.path.join(session.get_output_dir(),
                                        seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(
                                            ".", "_"))
                data = np.load(distFile + ".npy")
                bandlist = cfg["sensor"]["bands"].split(",")
                RasterHelper.saveToHdr_no_transform(data, distFile, bandlist, output_format)
                os.remove(distFile + ".npy")
コード例 #22
0
class Mitsuba(object):
    def __init__(self):
        # add path for the Python extension module
        if 'MITSUBA_DIR' not in os.environ:
            raise Exception(
                'Set MITSUBA_DIR env variable, or source Mitsuba setpath.sh')
        MITSUBA_DIR = os.path.join(os.environ['MITSUBA_DIR'], 'dist')
        sys.path.append(MITSUBA_DIR + os.sep + 'python' + os.sep + '2.7')

        # Ensure python can find Mitsuba core libraries
        os.environ['PATH'] = MITSUBA_DIR + os.pathsep + os.environ['PATH']

        # set up logger
        FORMAT = '%(asctime)-15s [%(levelname)s] %(message)s'
        logging.basicConfig(format=FORMAT)
        self.log = logging.getLogger('mts-render')
        self.log.setLevel(logging.INFO)

        # default setup
        self.setup = {
            'integrator': 'path',  # choices: path, mlt, vpl, ao
            'emitter': 'constant',
            'sensor': 'perspective',
            'film': 'ldrfilm',
            'sampler': 'ldsampler',
            'pixelFormat': 'rgba',
            'exposure': 0.0,  #Exposure value e, scales radiance by 2^e
            'banner': False,
            'camera_up': Vector(0, 1, 0),
            'eye': Point(0, 0, 0),
            'target': Point(0, 0, 0),
            'sampleCount': 256,
            'fov': 90.0,
            'width': 640,
            'height': 480,
        }

        # Start up the scheduling system
        self.scheduler = Scheduler.getInstance()
        for i in range(0, multiprocessing.cpu_count()):
            self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))

        self.queue = RenderQueue()
        self.pmgr = PluginManager.getInstance()

    def set_fov(self, degree):
        self.setup['fov'] = degree

    def set_sampleCount(self, sampleCount):
        self.setup['sampleCount'] = sampleCount

    def set_camera_up(self, x, y, z):
        self.setup['camera_up'] = Vector(x, y, z)

    def set_eye(self, x, y, z):
        self.setup['eye'] = Point(x, y, z)

    def set_target(self, x, y, z):
        self.setup['target'] = Point(x, y, z)

    def set_width(self, width):
        self.setup['width'] = width

    def set_height(self, height):
        self.setup['height'] = height

    def load_mesh(self, mesh_filenames):
        self.mesh = []
        self.mesh_meta = []

        for mesh_filename in mesh_filenames:
            if mesh_filename.endswith('.ply'):
                self.mesh.append(
                    self.pmgr.create({
                        'type': 'ply',
                        'filename': mesh_filename,
                        'bsdf': {
                            'type': 'twosided',
                            'bsdf': {
                                'type': 'diffuse',
                                'reflectance': {
                                    'type': 'vertexcolors'
                                }
                            }
                        }
                    }))
            elif mesh_filename.endswith('.obj'):
                self.mesh.append(
                    pmgr.create({
                        'type': 'obj',
                        'filename': mesh_filename
                    }))
            else:
                raise RuntimeError('Unsupported file type: ' + meshfile)

        #metadata
        for mesh in self.mesh:
            aabb = mesh.getAABB()
            bsphere = aabb.getBSphere()
            self.mesh_meta.append({
                'bsphere': {
                    'center': bsphere.center,
                    'radius': bsphere.radius
                },
                'aabb': {
                    'min': aabb.min,
                    'max': aabb.max
                }
            })

            self.log.info(aabb)
            self.log.info(bsphere)

    def render(self, filename):
        self.scheduler.start()
        # create globals
        integrator = self.pmgr.create({'type': self.setup['integrator']})
        emitter = self.pmgr.create({'type': self.setup['emitter']})
        sensor = self.pmgr.create({
            'type': self.setup['sensor'],
            'film': {
                'type': self.setup['film'],
                'width': self.setup['width'],
                'height': self.setup['height'],
                'pixelFormat': self.setup['pixelFormat'],
                'exposure': self.setup['exposure'],
                'banner': self.setup['banner']
            },
            'sampler': {
                'type': self.setup['sampler'],
                'sampleCount': self.setup['sampleCount']
            },
            'fov': self.setup['fov'],
        })

        scene = Scene()
        scene.addChild(integrator)
        scene.addChild(emitter)
        scene.addChild(sensor)
        for mesh in self.mesh:
            scene.addChild(mesh)
        scene.configure()
        scene.initialize()  # needed to force build of kd-tree

        transformCurr = Transform.lookAt(self.setup['eye'],
                                         self.setup['target'],
                                         self.setup['camera_up'])
        sensor.setWorldTransform(transformCurr)
        scene.setDestinationFile(filename)

        job = RenderJob('job', scene, self.queue)
        job.start()

        self.queue.waitLeft(0)
        self.queue.join()
        self.scheduler.stop()
コード例 #23
0
ファイル: pure_api.py プロジェクト: brendanaaa/Learnbgame
 def render_start(self, dest_file):
     self.cancelled = False
     self.queue = RenderQueue()
     self.buffer = InternalBufferDisplay(self)
     self.job = RenderJob('mtsblend_render', self.scene, self.queue)
     self.job.start()
コード例 #24
0
class Mitsuba(object):

    # Constructor
    def __init__(self, base_path, scene_name, params, screenParams):

        self.params = params
        self.screenParams = screenParams
        self.light = []
        # Get a reference to the thread's file resolver
        self.fileResolver = Thread.getThread().getFileResolver()
        scenes_path = base_path + '/' + scene_name + '/mitsuba'
        self.fileResolver.appendPath(scenes_path)
        paramMap = StringMap()
        paramMap['myParameter'] = 'value'

        ## Load the scene from an XML file
        self.scene = SceneHandler.loadScene(
            self.fileResolver.resolve(scene_name + '.xml'), paramMap)

        ## Setting & adding emmiters to scene - diffusive screen, created out of multiple sub-screens
        # self.SetWideScreen()
        # mitsuba.SetSunSky(np.array([[3, 300,3, 0,0,0, 0,0,1]]))
        # TODO : fix overidin of : self.SetWideScreen(params['screenWidth'] , params['screenHeight'],params['resXScreen'],params['resYScreen'], params['screenZPos'],params['variantRadiance'])
        self.addSceneLights()

        ## Simultaneously rendering multiple versions of a scene
        self.scene.initialize()
        self.scheduler = Scheduler.getInstance()
        ## Start up the scheduling system with one worker per local core
        if 'SYS_NAME' in os.environ and os.environ['SYS_NAME'] == 'AWS':
            maxThreads = multiprocessing.cpu_count()
        else:
            maxThreads = min(multiprocessing.cpu_count(), 30)
        for i in range(0, maxThreads):
            self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
        self.scheduler.start()

        ## Create a queue for tracking render jobs
        self.queue = RenderQueue()
        self.sceneResID = self.scheduler.registerResource(self.scene)

    # ----------------------SET SUNSKY -----------------------
    def SetSunSky(self, dir_vec, radiance=1):
        pmgr = PluginManager.getInstance()
        obj = pmgr.create({'type': 'sun', 'radiance': Spectrum(radiance)})
        self.light.append(obj)

    # ----------------------SET SPOTLIGHT -----------------------
    def SetSpotlight(self, dir_vec):
        pmgr = PluginManager.getInstance()
        obj = pmgr.create({
            'type':
            'spot',
            'cutoffAngle':
            40.0,
            'intensity':
            Spectrum(50),
            'toWorld':
            Transform.lookAt(
                Point(dir_vec[0, 0], dir_vec[0, 1], dir_vec[0, 2]),
                Point(dir_vec[0, 3], dir_vec[0, 4], dir_vec[0, 5]),
                Vector(dir_vec[0, 6], dir_vec[0, 7], dir_vec[0, 8]))
        })
        self.light.append(obj)

    # ----------------------SET SCREEN -----------------------
    def SetRectangleScreen(self, screenPos, radiance, dx, dy):
        """Set a sub-screen (rectangle shape attached to area emitter) at screenPos, with dimentions of [dx X dy],
                With radiance radiance [Watt/(m^2*sr)] """
        pmgr = PluginManager.getInstance()
        resctScreen = pmgr.create({
            'type':
            'rectangle',
            'bsdf': {
                'type': 'diffuse',
                'illuminant': Spectrum(1.0)
                #'reflectance' : Spectrum(0.78) # this is causing peaks noise in the result image - from reflections
            },
            'toWorld':
            Transform.translate(
                Vector(screenPos[0], screenPos[1], screenPos[2])) *
            Transform.rotate(Vector(1, 0, 0), 180.0) *
            Transform.scale(Vector(dx / 2, dy / 2, 1)),
            'emitter': {
                'type': 'constant',  #'area',#,'constant',#'area',
                'radiance': Spectrum(radiance),
                'samplingWeight': 10.0
            }
        })
        self.light.append(resctScreen)

    # ----------------------SET WIDE SCREEN -----------------------
    def SetWideScreen(self):
        # TODO : fix SetWideScreen() overriding
        #def SetWideScreen(self, width = 50.0 , height = 20.0, resX = 1, resY = 1, screenZPos = 2, rand = False):
        """Set a screen of light at Z = screenZPos, with dimentions of [width X height], containing [resX X resY] sub-surfaces of screens.
                The radiance [Watt/(m^2*sr)] of screeen can be either constant [1] of variant unifomily [0,1] """
        width = self.screenParams['screenWidth']
        height = self.screenParams['screenHeight']
        resX = self.screenParams['resXScreen']
        resY = self.screenParams['resYScreen']
        screenZPos = self.screenParams['screenZPos']
        rand = self.screenParams['variantRadiance']
        maxRadiance = self.screenParams['maxRadiance']

        screenXCorners = width / 2 * np.array([-1, 1])
        screenYCorners = height / 2 * np.array([-1, 1])
        dx = width / resX
        dy = height / resY

        screenX = np.linspace(screenXCorners[0] + dx / 2,
                              screenXCorners[1] - dx / 2,
                              num=resX)
        screenY = np.linspace(screenYCorners[0] + dy / 2,
                              screenYCorners[1] - dy / 2,
                              num=resY)
        for x in screenX:
            for y in screenY:
                curRadiance = np.random.uniform(
                    0.0, maxRadiance) if rand else maxRadiance
                self.SetRectangleScreen(np.array([x, y, screenZPos]),
                                        curRadiance, dx, dy)

    #def SetWideScreen(self):
    #"""Set a screen of light at Z = screenZPos, with dimentions of [width X height], containing [resX X resY] sub-surfaces of screens.
    #The radiance [Watt/(m^2*sr)] of screeen can be either constant [1] of variant unifomily [0,1] """
    #width = self.params['screenWidth']
    #height = self.params['screenHeight']
    #resX = self.params['resXScreen']
    #resY = self.params['resYScreen']
    #screenZPos = self.params['screenZPos']
    #rand = self.params['variantRadiance']
    #self.SetWideScreen() = SetWideScreen(width  , height , resX , resY, screenZPos, rand )

    # ----------------------SET CAMERA -----------------------
    def SetCamera(self, dir_vec):
        """Create and pre-set a new sensor according to dir_vec"""
        pmgr = PluginManager.getInstance()
        obj = pmgr.create({
            'type':
            'perspective',
            'toWorld':
            Transform.lookAt(
                Point(dir_vec[0, 0], dir_vec[0, 1], dir_vec[0, 2]),
                Point(dir_vec[0, 3], dir_vec[0, 4], dir_vec[0, 5]),
                Vector(dir_vec[0, 6], dir_vec[0, 7], dir_vec[0, 8])),
            #'focalLength': self.params['focalLength'], # focalLength can be achived by 'fov' & 'fovAxis'
            'fov':
            self.params['fov'],
            'fovAxis':
            self.params['fovAxis'],
            #'farClip':100.0,
            'film': {
                'type': 'hdrfilm',  #'mfilm',  #'ldrfilm',
                'width': self.params['nWidth'],
                'height': self.params['nHeight'],
            },
            'sampler': {
                'type': 'ldsampler',  #'independent',#'ldsampler',
                'sampleCount': self.params['sampleCount'],
                'dimension': self.params['samplerDimention']
            },
            'medium': {  # commented out only for debugging - 
                'type': 'homogeneous',
                'id': 'underwater',
                #'sigmaS' : Spectrum([0.4, 0.3, 0.3]),  #[0.02, 0.02, 0.02]),
                #'sigmaS' : Spectrum([0.0, 0.0, 0.0]),  # for simulations with no scattering bg_sigmaA / cloud_sigmaA /water_sigmaA
                #'sigmaA' : Spectrum([0.45, 0.06, 0.05]),  #[0.3, 0.3, 0.3]),
                'sigmaS': Spectrum(
                    [0.133, 0.1, 0.1]
                ),  # for simulations with no scattering bg_sigmaA / cloud_sigmaA /water_sigmaA
                'sigmaA':
                Spectrum([0.45, 0.06,
                          0.05]),  #[0.3, 0.3, 0.3]),		                
                'phase': {
                    'type': 'hg',
                    'g': 0.9
                }
            }
        })
        self.cam = obj

    def createSampler(self, sampleCount):
        pmgr = PluginManager.getInstance()
        obj = pmgr.create({
            'type': 'ldsampler',  #independent',#'sobol',
            'sampleCount': sampleCount,
            #'sampleCount' : self.params['sampleCount'],
            # TODO: add function for updating self.params instead of passing them
            'dimension': self.params['samplerDimention']  #,
            #'scramble':10 # not sure how this is working yet
        })
        self.sampler = obj

    # ----------------------RENDER-----------------------
    def addSceneLights(self):
        """ Adding all the pre-setted lights to the scene"""
        currScene = Scene(self.scene)
        for light in self.light:
            currScene.addChild(light)
        self.scene = currScene

    # ----------------------RENDERhjk-----------------------
    def Render(self, sampleCount, i):
        ## Creating a copy of the base scene and add modifications regarding varaiant camera's properties (sensor position, sampler)
        currScene = Scene(self.scene)
        currScene.configure()
        pmgr = PluginManager.getInstance()
        currScene.addSensor(self.cam)
        currScene.setSensor(self.cam)
        self.createSampler(sampleCount)
        currScene.setSampler(self.sampler)  #(self.sampler)
        currScene.setDestinationFile('')

        ## Create a render job and insert it into the queue
        #job = RenderJob('myRenderJob'+str(i), currScene, self.queue )
        curSceneResID = self.scheduler.registerResource(currScene)
        job = RenderJob('myRenderJob' + str(i), currScene, self.queue,
                        curSceneResID)
        #job = RenderJob('myRenderJob'+str(i), currScene, self.queue,self.sceneResID )  # passing self.sceneResID - in order to create shallow copy of the scene to all warkers
        job.start()

        self.queue.waitLeft(0)
        self.queue.join()

        ## Aquire Bitmap format of the rendered image:
        film = currScene.getFilm()
        size = film.getSize()
        bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
        film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)

        ## End of render - get result
        result_image = np.array(
            bitmap.buffer()) if sys.platform == 'linux2' else np.array(
                bitmap.getNativeBuffer())
        # TODO : update Mitsuba version of Windows, with the updated API - bitmap.getNativeBuffer() doesn't exsists animore
        currSceneInfo = currScene.getAABB
        return result_image, currSceneInfo

    def shutDownMitsuba(self):
        self.queue.join()
        self.scheduler.stop()
コード例 #25
0
paramMap = StringMap()

scene = SceneHandler.loadScene(fileResolver.resolve("cbox.xml"), paramMap)

#print(scene)

for nb_cores in range(1, multiprocessing.cpu_count()):

    scheduler = Scheduler.getInstance()
    for i in range(0, nb_cores):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))

    scheduler.start()

    queue = RenderQueue()
    scene.setDestinationFile('rendererResult_c'+str(nb_cores))

    job = RenderJob('testJob', scene, queue)
    job.start()

    time.sleep(60)

    job.cancel()

    queue.waitLeft(0)
    queue.join()

    scheduler.stop()

#print(Statistics.getInstance().getStats())
コード例 #26
0
ファイル: pure_api.py プロジェクト: brendanaaa/Learnbgame
        class InternalRenderContext:
            '''
            Mitsuba Internal Python API Render
            '''

            RENDER_API_TYPE = 'INT'

            cancelled = False
            thread = None
            scheduler = None
            fresolver = None
            log_level = {
                'default': EWarn,
                'verbose': EInfo,
                'quiet': EError,
            }

            def __init__(self):
                self.fresolver = main_fresolver.clone()
                self.thread = Thread.registerUnmanagedThread('renderer')
                self.thread.setFileResolver(self.fresolver)
                self.thread.setLogger(main_logger)

                self.render_engine = MtsManager.RenderEngine
                self.render_scene = MtsManager.CurrentScene

                if self.render_engine.is_preview:
                    verbosity = 'quiet'

                else:
                    verbosity = self.render_scene.mitsuba_engine.log_verbosity

                main_logger.setLogLevel(self.log_level[verbosity])

            def set_scene(self, export_context):
                if export_context.EXPORT_API_TYPE == 'FILE':
                    scene_path, scene_file = os.path.split(
                        efutil.filesystem_path(export_context.file_names[0]))
                    self.fresolver.appendPath(scene_path)
                    self.scene = SceneHandler.loadScene(
                        self.fresolver.resolve(scene_file))

                elif export_context.EXPORT_API_TYPE == 'API':
                    self.scene = export_context.scene

                else:
                    raise Exception('Unknown exporter type')

            def render_start(self, dest_file):
                self.cancelled = False
                self.queue = RenderQueue()
                self.buffer = InternalBufferDisplay(self)
                self.job = RenderJob('mtsblend_render', self.scene, self.queue)
                self.job.start()

                #out_file = FileStream(dest_file, FileStream.ETruncReadWrite)
                #self.bitmap.write(Bitmap.EPNG, out_file)
                #out_file.close()

            def render_stop(self):
                self.job.cancel()
                # Wait for the render job to finish
                self.queue.waitLeft(0)

            def render_cancel(self):
                self.cancelled = True
                MtsLog('Cancelling render.')

            def test_break(self):
                return self.render_engine.test_break() or self.cancelled

            def is_running(self):
                return self.job.isRunning()

            def returncode(self):
                return 0

            def wait_timer(self):
                pass