def Render(self, sampleCount, i): ## Creating a copy of the base scene and add modifications regarding varaiant camera's properties (sensor position, sampler) currScene = Scene(self.scene) currScene.configure() pmgr = PluginManager.getInstance() currScene.addSensor(self.cam) currScene.setSensor(self.cam) self.createSampler(sampleCount) currScene.setSampler(self.sampler) #(self.sampler) currScene.setDestinationFile('') ## Create a render job and insert it into the queue #job = RenderJob('myRenderJob'+str(i), currScene, self.queue ) curSceneResID = self.scheduler.registerResource(currScene) job = RenderJob('myRenderJob' + str(i), currScene, self.queue, curSceneResID) #job = RenderJob('myRenderJob'+str(i), currScene, self.queue,self.sceneResID ) # passing self.sceneResID - in order to create shallow copy of the scene to all warkers job.start() self.queue.waitLeft(0) self.queue.join() ## Aquire Bitmap format of the rendered image: film = currScene.getFilm() size = film.getSize() bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size) film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap) ## End of render - get result result_image = np.array( bitmap.buffer()) if sys.platform == 'linux2' else np.array( bitmap.getNativeBuffer()) # TODO : update Mitsuba version of Windows, with the updated API - bitmap.getNativeBuffer() doesn't exsists animore currSceneInfo = currScene.getAABB return result_image, currSceneInfo
def Render(self,sampleCount): currScene = Scene(self.scene) for light in self.light: currScene.addChild(light) currScene.configure() currScene.addSensor(self.cam) currScene.setSensor(self.cam) self.__createSampler(sampleCount) # sample count currScene.setSampler(self.sampler) currScene.setDestinationFile('') # Create a render job and insert it into the queue job = RenderJob('myRenderJob', currScene, self.queue ) job.start() self.queue.waitLeft(0) self.queue.join() film = currScene.getFilm() size = film.getSize() bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size) film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap) # End of render - get result result_image = np.array(bitmap.getNativeBuffer()) currSceneInfo = currScene.getAABB return result_image, currSceneInfo
def render_multiple_perspectives(mesh_path: Tuple[Path, Path], sensors: list, num_workers=8) -> None: """ Render one mesh from multiple camera perspectives :param mesh_path: Path tuples (input_filepath, output_dirpath) of the mesh to render :param sensors: The Mitsuba sensor definitions to render the mesh with :param num_workers: Number of CPU cores to use for rendering :return: """ queue = util.prepare_queue(num_workers) input_path, output_path = mesh_path # Make Mesh mesh = prepare_ply_mesh(input_path, util.get_predefined_spectrum('light_blue')) with tqdm(total=len(sensors), bar_format='Total {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}] {desc}', dynamic_ncols=True) as t: util.redirect_logger(tqdm.write, EWarn, t) for idx, sensor in enumerate(sensors): t.write(f"Rendering with sensor {idx}") # Make Scene scene = util.construct_simple_scene([mesh], sensor) scene.setDestinationFile(str(output_path / f'{input_path.stem}-{idx}.png')) # Make Result job = RenderJob(f'Render-{input_path.stem}-{idx}', scene, queue) job.start() queue.waitLeft(0) queue.join() t.update()
def renderScene(scene, index, sceneResID): # Create a queue for tracking render jobs queue = RenderQueue() # Create a render job and insert it into the queue. Note how the resource # ID of the original scene is provided to avoid sending the full scene # contents over the network multiple times. job = RenderJob('myRenderJob' + str(index), scene, queue, sceneResID) job.start() # Wait for all jobs to finish and release resources queue.waitLeft(0) queue.join()
def render(scene, filename): scheduler = Scheduler.getInstance() for i in range(multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, "wrk{}".format(i))) scheduler.start() queue = RenderQueue() scene.setDestinationFile(filename) job = RenderJob("myRenderJob", scene, queue) job.start() queue.waitLeft(0) queue.join() print(Statistics.getInstance().getStats())
def render_trajectory_frames(mesh_path: Tuple[Path, Path], trajectory: List[Transform], queue: RenderQueue, shutter_time: float, width: int = 1920, height: int = 1440, fov: float = 60., num_samples: int = 32) -> None: """ Render a camera trajectory through a mesh loaded from mesh_path[0] at the Transformations given in trajectory :param mesh_path: Path tuple (input_filepath, output_dirpath) of the mesh to render :param trajectory: List of Mitsuba Transformations, corresponding to cameras in the trajectory :param shutter_time: The camera shutter time. Controls the amount of motion blur between every pair of consecutive frames :param width: Parameter passed to cameras.create_sensor_from_transform :param height: Parameter passed to cameras.create_sensor_from_transform :param fov: The field of view :param num_samples: Parameter passed to cameras.create_sensor_from_transform :param queue: The Mitsuba render queue to use for all the frames :return: """ input_path, output_path = mesh_path # Make Mesh mesh_obj = mesh.prepare_ply_mesh( input_path, util.get_predefined_spectrum('light_blue')) # Create sensors with animation transforms sensors = cameras.create_animated_sensors(trajectory, shutter_time, width, height, fov, num_samples) scene = util.construct_simple_scene([mesh_obj], sensors[0]) with tqdm( total=len(sensors), bar_format= 'Total {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}] {desc}', dynamic_ncols=True) as t: util.redirect_logger(tqdm.write, EError, t) t.write(input_path.stem) for idx, sensor in enumerate(sensors): # Make Scene scene.setSensor(sensor) scene.setDestinationFile( str(output_path / f'{input_path.stem}-{idx}.png')) # Make Result job = RenderJob(f'Render-{input_path.stem}-{idx}', scene, queue) job.start() queue.waitLeft(0) queue.join() t.update()
def do_simulation_multi_spectral_py(): currdir = os.path.split(os.path.realpath(__file__))[0] sys.path.append(currdir + '/bin/rt/' + current_rt_program + '/python/2.7/') os.environ['PATH'] = currdir + '/bin/rt/' + current_rt_program + os.pathsep + os.environ['PATH'] import mitsuba from mitsuba.core import Vector, Point, Ray, Thread, Scheduler, LocalWorker, PluginManager, Transform from mitsuba.render import SceneHandler from mitsuba.render import RenderQueue, RenderJob from mitsuba.render import Scene import multiprocessing scheduler = Scheduler.getInstance() for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() cfgfile = session.get_config_file() f = open(cfgfile, 'r') cfg = json.load(f) distFileName = spectral_img_prefix + "_VZ=" + str(cfg["observation"]["obs_zenith"]) + \ "_VA=" + str(cfg["observation"]["obs_azimuth"]) distFile = os.path.join(session.get_output_dir(), distFileName).encode("utf-8") scene_file_path = os.path.join(session.get_scenefile_path(), main_scene_xml_file).encode("utf-8") scene_path = session.get_scenefile_path().encode("utf-8") fileResolver = Thread.getThread().getFileResolver() fileResolver.appendPath(scene_path) scene = SceneHandler.loadScene(fileResolver.resolve(scene_file_path)) scene.setDestinationFile(distFile) scene.configure() scene.initialize() queue = RenderQueue() sceneResID = scheduler.registerResource(scene) job = RenderJob(('Simulation Job '+distFileName).encode("utf-8"), scene, queue, sceneResID) job.start() queue.waitLeft(0) queue.join() if output_format not in ("npy", "NPY") and os.path.exists(distFile + ".npy"): data = np.load(distFile + ".npy") bandlist = cfg["sensor"]["bands"].split(",") RasterHelper.saveToHdr_no_transform(data, distFile, bandlist, output_format) os.remove(distFile + ".npy") log("INFO: Finished")
def render(self, filename): self.scheduler.start() # create globals integrator = self.pmgr.create({'type': self.setup['integrator']}) emitter = self.pmgr.create({'type': self.setup['emitter']}) sensor = self.pmgr.create({ 'type': self.setup['sensor'], 'film': { 'type': self.setup['film'], 'width': self.setup['width'], 'height': self.setup['height'], 'pixelFormat': self.setup['pixelFormat'], 'exposure': self.setup['exposure'], 'banner': self.setup['banner'] }, 'sampler': { 'type': self.setup['sampler'], 'sampleCount': self.setup['sampleCount'] }, 'fov': self.setup['fov'], }) scene = Scene() scene.addChild(integrator) scene.addChild(emitter) scene.addChild(sensor) for mesh in self.mesh: scene.addChild(mesh) scene.configure() scene.initialize() # needed to force build of kd-tree transformCurr = Transform.lookAt(self.setup['eye'], self.setup['target'], self.setup['camera_up']) sensor.setWorldTransform(transformCurr) scene.setDestinationFile(filename) job = RenderJob('job', scene, self.queue) job.start() self.queue.waitLeft(0) self.queue.join() self.scheduler.stop()
def __run_mitsuba(self): self.mitsuba_scene.configure() scheduler = Scheduler.getInstance() for i in range(multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, "worker_{}".format(i))) scheduler.start() queue = RenderQueue() self.mitsuba_scene.setDestinationFile(self.image_name) job = RenderJob("render job: {}".format(self.image_name), self.mitsuba_scene, queue) job.start() queue.waitLeft(0) queue.join() print(Statistics.getInstance().getStats()) scheduler.stop()
def render_pointclouds(pointcloud_paths: List[Tuple[Path, Path]], sensor, sphere_radius=1., num_workers=8) -> None: """ Render pointclouds by pkacing little Mitsuba spheres at all point locations :param pointcloud_paths: Path tuples (input_mesh, output_path) for all pointcloud files :param sensor: The sensor containing the transform to render with :param sphere_radius: Radius of individual point spheres :param num_workers: Number of concurrent render workers :return: """ queue = util.prepare_queue(num_workers) pointcloud_paths = list(pointcloud_paths) with tqdm( total=len(pointcloud_paths), bar_format= 'Total {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}] {desc}', dynamic_ncols=True) as t: util.redirect_logger(tqdm.write, EWarn, t) for pointcloud_path in pointcloud_paths: input_path, output_path = pointcloud_path t.write(f'Rendering {input_path.stem}') # Make Pointcloud Spheres spheres = create_spheres(load_from_ply(input_path), util.get_predefined_spectrum('orange'), sphere_radius) # Make Scene scene = util.construct_simple_scene(spheres, sensor) scene.setDestinationFile( str(output_path / f'{input_path.stem}.png')) # Make Result job = RenderJob(f'Render-{input_path.stem}', scene, queue) job.start() queue.waitLeft(0) queue.join() t.update()
def render_multiple_meshes(mesh_paths: List[Tuple[Path, Path]], radius_multiplier=3., positioning_vector=Vector3(0, 1, 1), tilt=Transform.rotate(util.axis_unit_vector('x'), 20.), width=1920, height=1440, num_samples=256, num_workers=8) -> None: """ Render multiple meshes with the camera always in the same relative position (based on the mesh). :param mesh_paths: Path tuples (input_filepath, output_dirpath) of the meshes to render :param radius_multiplier: Parameter passed to cameras.create_transform_on_bbsphere :param positioning_vector: Parameter passed to cameras.create_transform_on_bbsphere :param tilt: Parameter passed to cameras.create_transform_on_bbsphere :param width: Parameter passed to cameras.create_sensor_from_transform :param height: Parameter passed to cameras.create_sensor_from_transform :param num_samples: Parameter passed to cameras.create_sensor_from_transform :param num_workers: Number of CPU cores to use for rendering :return: """ queue = util.prepare_queue(num_workers) mesh_paths = list(mesh_paths) with tqdm(total=len(mesh_paths), bar_format='Total {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}] {desc}', dynamic_ncols=True) as t: util.redirect_logger(tqdm.write, EWarn, t) for mesh_path in mesh_paths: input_path, output_path = mesh_path t.write(f'Rendering {input_path.stem}') # Make Mesh mesh = prepare_ply_mesh(input_path, util.get_predefined_spectrum('light_blue')) # Make sensor sensor_transform = cameras.create_transform_on_bbsphere(mesh.getAABB(), radius_multiplier, positioning_vector, tilt) sensor = cameras.create_sensor_from_transform(sensor_transform, width, height, fov=45., num_samples=num_samples) # Make Scene scene = util.construct_simple_scene([mesh], sensor) scene.setDestinationFile(str(output_path / f'{input_path.stem}.png')) # Make Result job = RenderJob(f'Render-{input_path.stem}', scene, queue) job.start() queue.waitLeft(0) queue.join() t.update()
def main(): scene = makeScene() #print(scene) scheduler = Scheduler.getInstance() for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, "wrk{}".format(i))) scheduler.start() queue = RenderQueue() scene.setDestinationFile("renderedResult") job = RenderJob("MyRenderJob", scene, queue) job.start() queue.waitLeft(0) queue.join() print(Statistics.getInstance().getStats())
scene = SceneHandler.loadScene(fileResolver.resolve("scene1.xml"), paramMap) print(scene) scheduler = Scheduler.getInstance() for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() destinationFile = 'results/rendererResult' queue = RenderQueue() scene.setDestinationFile(destinationFile) job = RenderJob('testJob', scene, queue) job.start() startTime = time.time() renderInterval = 10 renderOutputDuration = 2; renderLength = 30*60 lastRender = startTime print("starting render for " + str(renderLength) + " seconds") while time.time() < startTime + renderLength: currentTime = math.floor(time.time()) if currentTime > lastRender + renderInterval: #time.sleep(1) lastRender = currentTime
def main(argv): scene_xml_name = '' camera_txt_name = '' output_folder = '' camera_indicate_name = '' # Read command line args opts, args = getopt.getopt(argv, "s:c:o:h:g:") for opt, arg in opts: if opt == '-h': print 'mitsuba_render.py -s <scene_xml> -c <camera_file> -g <good_camera_indicator_file> -o <output_directory>' sys.exit() elif opt in ("-s", "--scenefile"): scene_xml_name = arg elif opt in ("-c", "--camerafile"): camera_txt_name = arg elif opt in ("-o", "--outputdir"): output_folder = arg elif opt in ("-g", "--goodcamerafile"): camera_indicate_name = arg print('Render job for %s starts...\n' % scene_xml_name) if not os.path.exists(output_folder): os.makedirs(output_folder) camera = np.loadtxt(camera_txt_name) goodcam = np.loadtxt(camera_indicate_name) last_dest = output_folder + '%06i_mlt.png' % (len(camera) - 1) if os.path.isfile(last_dest): print('Job is done in the first round.\n') return tmp = scene_xml_name.split('/') work_path = '/'.join(tmp[0:-1]) os.chdir(work_path) paramMap = StringMap() paramMap['emitter_scale'] = '75' paramMap['fov'] = '63.414969' paramMap['origin'] = '0 0 0' paramMap['target'] = '1 1 1' paramMap['up'] = '1 0 0' paramMap['sampler'] = '1024' paramMap['width'] = '640' paramMap['height'] = '480' paramMap['envmap_path'] = './util_data/HDR_111_Parking_Lot_2_Ref.hdr' paramMap['default_texture_path'] = './util_data/wallp_0.jpg' fileResolver = Thread.getThread().getFileResolver() scene = SceneHandler.loadScene(fileResolver.resolve(scene_xml_name), paramMap) scheduler = Scheduler.getInstance() # Start up the scheduling system with one worker per local core for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() # Create a queue for tracking render jobs queue = RenderQueue() from mitsuba.render import Scene sensor = scene.getSensor() scene.initialize() for i in range(0, len(camera)): if goodcam[i] < 0.5: continue destination = output_folder + '%06i_mlt' % i if os.path.isfile(destination + '.rgbe'): continue c = camera[i] t = Transform.lookAt(Point(c[0], c[1], c[2]), Point(c[0] + c[3], c[1] + c[4], c[2] + c[5]), Vector(c[6], c[7], c[8])) sensor.setWorldTransform(t) scene.setDestinationFile(destination) # # Create a render job and insert it into the queue. Note how the resource # # ID of the original scene is provided to avoid sending the full scene # # contents over the network multiple times. job = RenderJob('myRenderJob' + str(i), scene, queue) job.start() queue.waitLeft(0) print('%d render jobs finished!\n' % len(camera))
def do_simulation_multiangle_seq(seqname): currdir = os.path.split(os.path.realpath(__file__))[0] sys.path.append(currdir + '/bin/rt/' + current_rt_program + '/python/2.7/') os.environ['PATH'] = currdir + '/bin/rt/' + current_rt_program + os.pathsep + os.environ['PATH'] import mitsuba from mitsuba.core import Vector, Point, Ray, Thread, Scheduler, LocalWorker, PluginManager, Transform from mitsuba.render import SceneHandler from mitsuba.render import RenderQueue, RenderJob from mitsuba.render import Scene import multiprocessing scheduler = Scheduler.getInstance() for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() scene_path = session.get_scenefile_path() fileResolver = Thread.getThread().getFileResolver() fileResolver.appendPath(str(scene_path)) scene = SceneHandler.loadScene(fileResolver.resolve( str(os.path.join(session.get_scenefile_path(), main_scene_xml_file)))) scene.configure() scene.initialize() queue = RenderQueue() sceneResID = scheduler.registerResource(scene) bsphere = scene.getKDTree().getAABB().getBSphere() radius = bsphere.radius targetx, targety, targetz = bsphere.center[0], bsphere.center[1], bsphere.center[2] f = open(seqname + ".conf", 'r') params = json.load(f) obs_azimuth = params['seq1']['obs_azimuth'] obs_zenith = params['seq2']['obs_zenith'] cfgfile = session.get_config_file() f = open(cfgfile, 'r') cfg = json.load(f) viewR = cfg["sensor"]["obs_R"] mode = cfg["sensor"]["film_type"] azi_arr = map(lambda x: float(x), obs_azimuth.strip().split(":")[1].split(",")) zeni_arr = map(lambda x: float(x), obs_zenith.strip().split(":")[1].split(",")) seq_header = multi_file_prefix + "_" + seqname index = 0 for azi in azi_arr: for zeni in zeni_arr: distFile = os.path.join(session.get_output_dir(), seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(".", "_")) newScene = Scene(scene) pmgr = PluginManager.getInstance() newSensor = pmgr.createObject(scene.getSensor().getProperties()) theta = zeni / 180.0 * math.pi phi = (azi - 90) / 180.0 * math.pi scale_x = radius scale_z = radius toWorld = Transform.lookAt( Point(targetx - viewR * math.sin(theta) * math.cos(phi), targety + viewR * math.cos(theta), targetz - viewR * math.sin(theta) * math.sin(phi)), # original Point(targetx, targety, targetz), # target Vector(0, 0, 1) # up ) * Transform.scale( Vector(scale_x, scale_z, 1) # 视场大小 ) newSensor.setWorldTransform(toWorld) newFilm = pmgr.createObject(scene.getFilm().getProperties()) newFilm.configure() newSensor.addChild(newFilm) newSensor.configure() newScene.addSensor(newSensor) newScene.setSensor(newSensor) newScene.setSampler(scene.getSampler()) newScene.setDestinationFile(str(distFile)) job = RenderJob('Simulation Job' + "VA_"+str(azi)+"_VZ_"+str(zeni), newScene, queue, sceneResID) job.start() queue.waitLeft(0) queue.join() # handle npy if mode == "spectrum" and (output_format not in ("npy", "NPY")): for azi in azi_arr: for zeni in zeni_arr: distFile = os.path.join(session.get_output_dir(), seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace( ".", "_")) data = np.load(distFile + ".npy") bandlist = cfg["sensor"]["bands"].split(",") RasterHelper.saveToHdr_no_transform(data, distFile, bandlist, output_format) os.remove(distFile + ".npy")
scheduler = Scheduler.getInstance() # Start up the scheduling system with one worker per local core for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() # Create a queue for tracking render jobs queue = RenderQueue() sensor = scene.getSensor() shapes = scene.getShapes() # Pre Rendering for kd tree scene.setDestinationFile('Fake') job = RenderJob('fakeJob', scene, queue) job.start() queue.waitLeft(0) queue.join() aabb = scene.getAABB() leftmins = [] rightmaxs = [] print "Nature AABB ##################" print aabb.getCorner(0) print aabb.getCorner(7) print "##################" #for Invalid Aabb
class MitsubaDemo(QDialog): renderProgress = Signal(int) def __init__(self, parent, width, height, barHeight, progressHeight, folderPath, sceneName, mitsubaPath, frameNumber, scenePath, renderWindowPosX, renderWindowPosY, pluginPath): QDialog.__init__( self, parent ) #global isWindowExist #isWindowExist = True #global window #window = self self.startRender = True self.folderPath = folderPath self.mitsubaPath = mitsubaPath self.frameNumber = frameNumber self.scenePath = scenePath # Initialize Mitsuba if renderWindowPosX == -1 and renderWindowPosY == -1: self.setGeometry((GetSystemMetrics(0) - width - 20) / 2,(GetSystemMetrics(1) - height - barHeight - progressHeight - 40)/2, width + 20, height + barHeight + progressHeight + 40) else: self.setGeometry(renderWindowPosX, renderWindowPosY, width + 20, height + barHeight + progressHeight + 40) self.Colorize() btnStop = QtGui.QPushButton("Stop", self) btnStop.setMaximumSize(70, barHeight) btnRefresh = QtGui.QPushButton("Refresh", self) btnRefresh.setMaximumSize(80, barHeight) btnSave = QtGui.QPushButton("Save", self) btnSave.setMaximumSize(70, barHeight) self.connect(btnStop, SIGNAL('clicked()'), self.stopMethod) self.connect(btnRefresh, SIGNAL('clicked()'), self.refreshMethod) self.connect(btnSave, SIGNAL('clicked()'), self.saveMethod) doneLabel = QLabel() font = QtGui.QFont('Tahoma', 14) font.setBold(True) doneLabel.setFont(font) doneLabel.setText("Rendering...") doneLabel.setStyleSheet("QLabel { color: #d37700; }"); sIcon = QIcon(pluginPath + "\\icons\\" + "save_3118.png") btnSave.setIcon(sIcon) stopIcon = QIcon(pluginPath + "\\icons\\" + "stop_32_1340.png") btnStop.setIcon(stopIcon) rIcon = QIcon(pluginPath + "\\icons\\" + "view-refresh_2730.png") btnRefresh.setIcon(rIcon) imgLabel = QLabel() qp = QtGui.QPainter() layout = QGridLayout(self) layout.setSpacing(10) layout.addWidget(btnSave, 1, 0) layout.addWidget(btnRefresh, 1, 1) layout.addWidget(btnStop, 1, 2) layout.addWidget(imgLabel, 2, 0, 1, 4) self.initializeMitsuba() self.scene = SceneHandler.loadScene(folderPath + "\\" + sceneName) self.show() self.job = RenderJob('rjob', self.scene, self.queue) self.job.setInteractive(True) # Initialize the user interface progress = QtGui.QProgressBar(self) layout.addWidget(doneLabel, 3, 0, 1, 3) layout.addWidget(progress, 3, 3) self.rwidget = RenderWidget(self, self.queue, self.scene.getFilm().getSize()) self.setWindowTitle('Mitsuba render window') # Hide the scroll bar once the rendering is done def renderingUpdated(event): progress.setValue(100 * self.rwidget.extProgress / (self.rwidget.extIMG.width() * self.rwidget.extIMG.height())) imgLabel.setPixmap(QPixmap.fromImage(self.rwidget.extIMG)); if event == MitsubaRenderBuffer.RENDERING_FINISHED: lm("FINISH!!!") self.startRender = False #progress.hide() doneLabel.setText("Done!") doneLabel.setStyleSheet("QLabel { color: #00d328; }"); self.rwidget.renderingUpdated.connect(renderingUpdated, Qt.QueuedConnection) # Start the rendering process self.job.start() def initializeMitsuba(self): # Start up the scheduling system with one worker per local core self.scheduler = Scheduler.getInstance() if self.scheduler.isRunning() == True: self.scheduler.stop() for i in range(0, multiprocessing.cpu_count()): self.scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) self.scheduler.start() # Create a queue for tracking render jobs self.queue = RenderQueue() # Get a reference to the plugin manager self.pmgr = PluginManager.getInstance() # Process Mitsuba log and progress messages within Python class CustomAppender(Appender): def append(self2, logLevel, message): print(message) def logProgress(self2, progress, name, formatted, eta): # Asynchronously notify the main thread self.renderProgress.emit(progress) logger = Thread.getThread().getLogger() logger.setLogLevel(EWarn) # Display warning & error messages logger.clearAppenders() logger.addAppender(CustomAppender()) def stopMethod(self): lm("stop workers=" + str(self.scheduler.getWorkerCount())) i = self.scheduler.getWorkerCount() - 1 while (i > 0 or i == 0): worker = self.scheduler.getWorker(i) self.scheduler.unregisterWorker(worker) i = i - 1 self.queue.join() self.scheduler.stop() def closeEvent(self, e): self.stopMethod() def keyPressEvent(self, e): if e.key() == Qt.Key_Escape: self.close() def refreshMethod(self): Refresh(self.frameNumber, self, self.geometry().x(), self.geometry().y()) def saveMethod(self): if self.startRender == False: filename = QtGui.QFileDialog.getSaveFileName(self, 'Open file', self.folderPath, "Images (*.png *.jpg)") self.rwidget.extIMG.save(filename) def Colorize(self): self.strStyleSheet = """ QDialog { background-color: #222; color: #FFF; } QLabel { background-color: #222; } QPushButton { background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #555, stop: 1 #444 ); border: 3; border-radius: 3px; color: #EEE; padding: 20px 10px; } QPushButton:hover { background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #666, stop: 1 #555 ); border: 0.5px outset #444; } QPushButton:pressed { background: QLinearGradient( x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #666, stop: 1 #444 ); border: 0.5px outset #444; } QProgressBar{ border: 2px solid grey; border-radius: 5px; text-align: center } """ self.setStyleSheet(self.strStyleSheet)
def render_start(self, dest_file): self.cancelled = False self.queue = RenderQueue() self.buffer = InternalBufferDisplay(self) self.job = RenderJob('mtsblend_render', self.scene, self.queue) self.job.start()
def renderVPLS(vpls, cam, scene, target): ''' render VPLS having the camera looking at the desired target the scene. The result will be an image that will be used to define the 3D space where the camera and object can be placed in the environment. Target can be either be 'roof' or 'floor' ''' pmgr = PluginManager.getInstance() scheduler = Scheduler.getInstance() if (target == 'roof'): target_height = scene.HeightRoof else: target_height = scene.HeightFloor # Start up the scheduling system with one worker per local core for i in range(0, multiprocessing.cpu_count()): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() # Create a queue for tracking render jobs queue = RenderQueue() nVPLS = int(vpls[0][1]) * 4 scene = Scene() for i in xrange(1, nVPLS, 4): if (float(vpls[i][2]) == target_height): scene.addChild( pmgr.create({ 'type': 'sphere', 'center': Point(float(vpls[i][1]), float(vpls[i][2]), float(vpls[i][3])), 'radius': 1.0, 'emitter': pmgr.create({ 'type': 'area', 'radiance': Spectrum(10.), }) })) scene.addChild( pmgr.create({ 'type': 'perspective', 'toWorld': Transform.lookAt( Point(cam.origin[0], cam.origin[1], cam.origin[2]), Point(cam.target[0], cam.target[1], cam.target[2]), Vector(cam.up[0], cam.up[1], cam.up[2])), 'fov': cam.fov, 'film': { 'type': 'ldrfilm', 'width': cam.width, 'height': cam.height, 'banner': False, }, 'sampler': { 'type': 'halton', 'sampleCount': 1 }, })) scene.addChild(pmgr.create({'type': 'direct'})) scene.configure() if (target == 'roof'): filename = 'renderVPLSRoof' else: filename = 'renderVPLSFloor' scene.setDestinationFile(filename) # Create a render job and insert it into the queue job = RenderJob('myRenderJob', scene, queue) job.start() # Wait for all jobs to finish and release resources queue.waitLeft(0) queue.join() scheduler.stop()
class InternalRenderContext: ''' Mitsuba Internal Python API Render ''' RENDER_API_TYPE = 'INT' cancelled = False thread = None scheduler = None fresolver = None log_level = { 'default': EWarn, 'verbose': EInfo, 'quiet': EError, } def __init__(self): self.fresolver = main_fresolver.clone() self.thread = Thread.registerUnmanagedThread('renderer') self.thread.setFileResolver(self.fresolver) self.thread.setLogger(main_logger) self.render_engine = MtsManager.RenderEngine self.render_scene = MtsManager.CurrentScene if self.render_engine.is_preview: verbosity = 'quiet' else: verbosity = self.render_scene.mitsuba_engine.log_verbosity main_logger.setLogLevel(self.log_level[verbosity]) def set_scene(self, export_context): if export_context.EXPORT_API_TYPE == 'FILE': scene_path, scene_file = os.path.split(efutil.filesystem_path(export_context.file_names[0])) self.fresolver.appendPath(scene_path) self.scene = SceneHandler.loadScene(self.fresolver.resolve(scene_file)) elif export_context.EXPORT_API_TYPE == 'API': self.scene = export_context.scene else: raise Exception('Unknown exporter type') def render_start(self, dest_file): self.cancelled = False self.queue = RenderQueue() self.buffer = InternalBufferDisplay(self) self.job = RenderJob('mtsblend_render', self.scene, self.queue) self.job.start() #out_file = FileStream(dest_file, FileStream.ETruncReadWrite) #self.bitmap.write(Bitmap.EPNG, out_file) #out_file.close() def render_stop(self): self.job.cancel() # Wait for the render job to finish self.queue.waitLeft(0) def render_cancel(self): self.cancelled = True MtsLog('Cancelling render.') def test_break(self): return self.render_engine.test_break() or self.cancelled def is_running(self): return self.job.isRunning() def returncode(self): return 0 def wait_timer(self): pass
target0 = Vector(0.681017, 0.344339, 0.841651) origin0 = Vector(1.06275, -0.147389, 1.62427) up0 = Vector(-0.180728, -0.870101, -0.458544) target1 = Vector(0.0329559, 0.23162, 0.731513) origin1 = Vector(0.975366, 0.0865425, 0.430146) up1 = Vector(-0.0605996, -0.960187, 0.272722) sensor = scene.getSensor() frames = 60 for x in range(0, frames): frac = float(x) / float(frames - 1) target = (1.0 - frac) * target0 + frac * target1 origin = (1.0 - frac) * origin0 + frac * origin1 up = (1.0 - frac) * up0 + frac * up1 #xform = Transform.lookAt(Point(target), Point(origin), up) xform = Transform.lookAt(Point(origin), Point(target), up) sensor.setWorldTransform(xform) # below here seems to be what needs to be in the inner loop filename = "result_" + str(x) + ".exr" scene.setDestinationFile(filename) job = RenderJob('myRenderJob', scene, queue) job.start() queue.waitLeft(0) queue.join() print(Statistics.getInstance().getStats())
def __init__(self, parent, width, height, barHeight, progressHeight, folderPath, sceneName, mitsubaPath, frameNumber, scenePath, renderWindowPosX, renderWindowPosY, pluginPath): QDialog.__init__( self, parent ) #global isWindowExist #isWindowExist = True #global window #window = self self.startRender = True self.folderPath = folderPath self.mitsubaPath = mitsubaPath self.frameNumber = frameNumber self.scenePath = scenePath # Initialize Mitsuba if renderWindowPosX == -1 and renderWindowPosY == -1: self.setGeometry((GetSystemMetrics(0) - width - 20) / 2,(GetSystemMetrics(1) - height - barHeight - progressHeight - 40)/2, width + 20, height + barHeight + progressHeight + 40) else: self.setGeometry(renderWindowPosX, renderWindowPosY, width + 20, height + barHeight + progressHeight + 40) self.Colorize() btnStop = QtGui.QPushButton("Stop", self) btnStop.setMaximumSize(70, barHeight) btnRefresh = QtGui.QPushButton("Refresh", self) btnRefresh.setMaximumSize(80, barHeight) btnSave = QtGui.QPushButton("Save", self) btnSave.setMaximumSize(70, barHeight) self.connect(btnStop, SIGNAL('clicked()'), self.stopMethod) self.connect(btnRefresh, SIGNAL('clicked()'), self.refreshMethod) self.connect(btnSave, SIGNAL('clicked()'), self.saveMethod) doneLabel = QLabel() font = QtGui.QFont('Tahoma', 14) font.setBold(True) doneLabel.setFont(font) doneLabel.setText("Rendering...") doneLabel.setStyleSheet("QLabel { color: #d37700; }"); sIcon = QIcon(pluginPath + "\\icons\\" + "save_3118.png") btnSave.setIcon(sIcon) stopIcon = QIcon(pluginPath + "\\icons\\" + "stop_32_1340.png") btnStop.setIcon(stopIcon) rIcon = QIcon(pluginPath + "\\icons\\" + "view-refresh_2730.png") btnRefresh.setIcon(rIcon) imgLabel = QLabel() qp = QtGui.QPainter() layout = QGridLayout(self) layout.setSpacing(10) layout.addWidget(btnSave, 1, 0) layout.addWidget(btnRefresh, 1, 1) layout.addWidget(btnStop, 1, 2) layout.addWidget(imgLabel, 2, 0, 1, 4) self.initializeMitsuba() self.scene = SceneHandler.loadScene(folderPath + "\\" + sceneName) self.show() self.job = RenderJob('rjob', self.scene, self.queue) self.job.setInteractive(True) # Initialize the user interface progress = QtGui.QProgressBar(self) layout.addWidget(doneLabel, 3, 0, 1, 3) layout.addWidget(progress, 3, 3) self.rwidget = RenderWidget(self, self.queue, self.scene.getFilm().getSize()) self.setWindowTitle('Mitsuba render window') # Hide the scroll bar once the rendering is done def renderingUpdated(event): progress.setValue(100 * self.rwidget.extProgress / (self.rwidget.extIMG.width() * self.rwidget.extIMG.height())) imgLabel.setPixmap(QPixmap.fromImage(self.rwidget.extIMG)); if event == MitsubaRenderBuffer.RENDERING_FINISHED: lm("FINISH!!!") self.startRender = False #progress.hide() doneLabel.setText("Done!") doneLabel.setStyleSheet("QLabel { color: #00d328; }"); self.rwidget.renderingUpdated.connect(renderingUpdated, Qt.QueuedConnection) # Start the rendering process self.job.start()
paramMap = StringMap() scene = SceneHandler.loadScene(fileResolver.resolve("cbox.xml"), paramMap) #print(scene) for nb_cores in range(1, multiprocessing.cpu_count()): scheduler = Scheduler.getInstance() for i in range(0, nb_cores): scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i)) scheduler.start() queue = RenderQueue() scene.setDestinationFile('rendererResult_c'+str(nb_cores)) job = RenderJob('testJob', scene, queue) job.start() time.sleep(60) job.cancel() queue.waitLeft(0) queue.join() scheduler.stop() #print(Statistics.getInstance().getStats())
class InternalRenderContext: ''' Mitsuba Internal Python API Render ''' RENDER_API_TYPE = 'INT' cancelled = False thread = None scheduler = None fresolver = None log_level = { 'default': EWarn, 'verbose': EInfo, 'quiet': EError, } def __init__(self): self.fresolver = main_fresolver.clone() self.thread = Thread.registerUnmanagedThread('renderer') self.thread.setFileResolver(self.fresolver) self.thread.setLogger(main_logger) self.render_engine = MtsManager.RenderEngine self.render_scene = MtsManager.CurrentScene if self.render_engine.is_preview: verbosity = 'quiet' else: verbosity = self.render_scene.mitsuba_engine.log_verbosity main_logger.setLogLevel(self.log_level[verbosity]) def set_scene(self, export_context): if export_context.EXPORT_API_TYPE == 'FILE': scene_path, scene_file = os.path.split( efutil.filesystem_path(export_context.file_names[0])) self.fresolver.appendPath(scene_path) self.scene = SceneHandler.loadScene( self.fresolver.resolve(scene_file)) elif export_context.EXPORT_API_TYPE == 'API': self.scene = export_context.scene else: raise Exception('Unknown exporter type') def render_start(self, dest_file): self.cancelled = False self.queue = RenderQueue() self.buffer = InternalBufferDisplay(self) self.job = RenderJob('mtsblend_render', self.scene, self.queue) self.job.start() #out_file = FileStream(dest_file, FileStream.ETruncReadWrite) #self.bitmap.write(Bitmap.EPNG, out_file) #out_file.close() def render_stop(self): self.job.cancel() # Wait for the render job to finish self.queue.waitLeft(0) def render_cancel(self): self.cancelled = True MtsLog('Cancelling render.') def test_break(self): return self.render_engine.test_break() or self.cancelled def is_running(self): return self.job.isRunning() def returncode(self): return 0 def wait_timer(self): pass