Esempio n. 1
0
 def Render(self,sampleCount):
         currScene = Scene(self.scene)
         for light in self.light:
                 currScene.addChild(light)
         currScene.configure()    
         currScene.addSensor(self.cam)   
         currScene.setSensor(self.cam) 
         self.__createSampler(sampleCount) # sample count
         currScene.setSampler(self.sampler)
      
         currScene.setDestinationFile('')
         # Create a render job and insert it into the queue
         job = RenderJob('myRenderJob', currScene, self.queue )
         job.start()
         self.queue.waitLeft(0)
         self.queue.join()
         
         film = currScene.getFilm()
         size = film.getSize()
         bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
         film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)
         # End of render - get result
         result_image = np.array(bitmap.getNativeBuffer())                                
         currSceneInfo = currScene.getAABB
         return result_image, currSceneInfo
Esempio n. 2
0
def makeScene():

    scene = Scene()

    pmgr = PluginManager.getInstance()

    # make shapes
    for i in range(100):
        shapeProps = Properties("sphere")
        shapeProps["center"] = Point(i, i, i)
        shapeProps["radius"] = 0.1
        shape = pmgr.createObject(shapeProps)
        shape.configure()

        scene.addChild(shape)

    # make perspective sensor
    sensorProps = Properties("perspective")
    sensorProps["toWorld"] = Transform.lookAt(Point(0, 0, 10), Point(0, 0, 0),
                                              Vector(0, 1, 0))
    sensorProps["fov"] = 45.0

    sensor = pmgr.createObject(sensorProps)

    # make film
    filmProps = Properties("ldrfilm")
    filmProps["width"] = 640
    filmProps["height"] = 480

    film = pmgr.createObject(filmProps)
    film.configure()

    sensor.addChild("film", film)
    sensor.configure()

    scene.addChild(sensor)
    scene.configure()

    return scene
Esempio n. 3
0
def makeScene():

    scene = Scene()

    pmgr = PluginManager.getInstance()

    # make shapes
    for i in range(100):
        shapeProps = Properties("sphere")
        shapeProps["center"] = Point(i, i, i)
        shapeProps["radius"] = 0.1
        shape = pmgr.createObject(shapeProps)
        shape.configure()

        scene.addChild(shape)

    # make perspective sensor
    sensorProps = Properties("perspective")
    sensorProps["toWorld"] = Transform.lookAt(Point(0, 0, 10), Point(0, 0, 0), Vector(0, 1, 0))
    sensorProps["fov"] = 45.0

    sensor = pmgr.createObject(sensorProps)

    # make film
    filmProps = Properties("ldrfilm")
    filmProps["width"]  = 640
    filmProps["height"] = 480

    film = pmgr.createObject(filmProps)
    film.configure()

    sensor.addChild("film", film)
    sensor.configure()

    scene.addChild(sensor)
    scene.configure()

    return scene
Esempio n. 4
0
def build_scene(env):
    #architecture is :
    #-env.name
    #--exterior
    #----ingredients
    #--compartments
    #----surface
    #------ingredients
    #----interior
    #------ingredients
    #create the document and a node for rootenv
    pmgr=PluginManager.getInstance()
    scene = Scene();
    # Create a sensor,film&samplegenerator
    scene.addChild(setEnv(pmgr))
    scene.addChild(setIntegrator(pmgr))
    scene.addChild(setLight(pmgr))
        
    root_env=scene.createNullObject(str(env.name))
    r =  env.exteriorRecipe
    if r : scene,root_env = buildRecipe(r,r.name,scene,root_env)
    for o in env.compartments:
        rs = o.surfaceRecipe
        if rs : 
            p,s,bb=up (767.0) #used for lipids
            pp,ss,bbsurface = up (700.0)
            bbsurface = numpy.array([[p-ss/2.0],[p+ss/2.0]])
            scene,root_env = buildRecipe(rs,str(o.name)+"_surface",scene,root_env,mask=bbsurface)
        ri = o.innerRecipe
        if ri : 
            pp,ss,bbmatrix = up (650.0)
            bbmatrix = numpy.array([[p-ss/2.0],[p+ss/2.0]])
            scene,root_env = buildRecipe(ri,str(o.name)+"_interior",scene,root_env,mask=bbsurface)
        #build the compartments geometry
#        buildCompartmentsGeom(o,scene,parent=root_env)
        #['ID']['node', 'color', 'id', 'instances', 'mesh', 'parentmesh']
        
    fname = "/home/ludo/"+env.name+str(env.version)+".mxs"
    ok = scene.writeMXS(str(fname));
    print "write",ok
    if not ok:
		print("Error saving MXS ('/home/ludo/"+env.name+str(env.version)+".mxs')");
		return 0;
    return scene
    
#Settheintegrator
    scene.addChild(pmgr.create({
'
type
'
:
'
direct
'
}))
Esempio n. 5
0
    def render(self, filename):
        self.scheduler.start()
        # create globals
        integrator = self.pmgr.create({'type': self.setup['integrator']})
        emitter = self.pmgr.create({'type': self.setup['emitter']})
        sensor = self.pmgr.create({
            'type': self.setup['sensor'],
            'film': {
                'type': self.setup['film'],
                'width': self.setup['width'],
                'height': self.setup['height'],
                'pixelFormat': self.setup['pixelFormat'],
                'exposure': self.setup['exposure'],
                'banner': self.setup['banner']
            },
            'sampler': {
                'type': self.setup['sampler'],
                'sampleCount': self.setup['sampleCount']
            },
            'fov': self.setup['fov'],
        })

        scene = Scene()
        scene.addChild(integrator)
        scene.addChild(emitter)
        scene.addChild(sensor)
        for mesh in self.mesh:
            scene.addChild(mesh)
        scene.configure()
        scene.initialize()  # needed to force build of kd-tree

        transformCurr = Transform.lookAt(self.setup['eye'],
                                         self.setup['target'],
                                         self.setup['camera_up'])
        sensor.setWorldTransform(transformCurr)
        scene.setDestinationFile(filename)

        job = RenderJob('job', scene, self.queue)
        job.start()

        self.queue.waitLeft(0)
        self.queue.join()
        self.scheduler.stop()
Esempio n. 6
0
def construct_simple_scene(scene_objects, sensor) -> Scene:
    """
    Construct a simple scene containing given objects and using the given sensor. Uses the path integrator and constant
    emitter
    :param scene_objects: All scene child objects to add
    :param sensor: The mitsuba sensor definition to use for this scene
    :return: The scene created, already configured and initialized
    """
    pmgr = PluginManager.getInstance()
    integrator = pmgr.create({'type': 'path'})
    emitter = pmgr.create({'type': 'constant'})

    scene = Scene()
    scene.addChild(integrator)
    scene.addChild(emitter)
    scene.addChild(sensor)
    for obj in scene_objects:
        scene.addChild(obj)

    scene.configure()
    scene.initialize()

    return scene
def makeScene():

    pmgr = PluginManager.getInstance()

    scene = Scene()

    scene.addChild(pmgr.create({
        'type' : 'perspective',
        'toWorld' : Transform.lookAt(
            Point(0, 0, -10),
            Point(0, 0, 0),
            Vector(0, 1, 0)
        ),
        'film' : {
            'type'   : 'ldrfilm',
            'width'  : 1920,
            'height' : 1080
        },
        'sampler' : {
            'type'        : 'ldsampler',
            'sampleCount' : 2
        }
    }))

    scene.addChild(pmgr.create({
        'type' : 'point',
        'position' : Point(5, 0, -10),
        'intensity' : Spectrum(100)
    }))

    scene.addChild(pmgr.create({
            'type' : 'sphere',
            'center' : Point(0, 0, 0),
            'radius' : 1.0,
            'bsdf' : {
                    'type' : 'diffuse',
                    'reflectance' : Spectrum(0.4)
            }
    }))

    scene.configure()

    return scene
Esempio n. 8
0
class MitsubaRenderer(AbstractRenderer):
    def __init__(self, scene):
        super(MitsubaRenderer, self).__init__(scene)

    def render(self):
        self.__initialize()
        self.__add_integrator()
        self.__add_lights()
        self.__add_active_camera()
        self.__add_active_view()
        self.__add_active_primitives()
        self.__add_others()
        self.__run_mitsuba()

    def __initialize(self):
        self.__initialize_mitsuba_setting()
        self.__initialize_image_setting()
        self.__initialize_geometry_setting()

    def __initialize_mitsuba_setting(self):
        self.plgr = PluginManager.getInstance()
        self.output_dir = self.scene.output_dir

        mitsuba_module_path = os.path.dirname(inspect.getfile(MitsubaRenderer))
        self.file_resolver = Thread.getThread().getFileResolver()
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "xml_files/"))
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "textures/"))
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "shapes/"))

        self.mitsuba_scene = Scene()

    def __initialize_image_setting(self):
        active_view = self.scene.active_view
        self.image_name = os.path.join(self.output_dir, active_view.name)
        self.image_width = active_view.width
        self.image_height = active_view.height

    def __initialize_geometry_setting(self):
        active_view = self.scene.active_view
        self.global_transform = self.scene.global_transform
        self.floor_height = 1e-12
        if len(active_view.vertices) == 0:
            dim = active_view.vertices.shape[1]
            self.transformed_bbox_min = np.zeros(dim)
            self.transformed_bbox_max = np.ones(dim)

    def __add_integrator(self):
        if self.with_alpha:
            integrator = self.plgr.create({
                "type": "volpath",
                "rrDepth": 20
            })
        else:
            integrator = self.plgr.create({
                "type": "direct",
                "shadingSamples": 16
            })
        self.mitsuba_scene.addChild(integrator)

    def __add_lights(self):
        #TODO: load lights from scene
        front_light = self.plgr.create({
            "type": "sphere",
            "center": Point(3.0, 6.0, 4.0),
            "radius": 2.5,
            "emitter": {
                "type": "area",
                "radiance": Spectrum(10.0),
                "samplingWeight": 10.0
            }
        })

        side_light = self.plgr.create({
            "type": "point",
            "position": Point(4.0, 4.0, -1.0),
            "intensity": Spectrum(5.0)
        })

        back_light = self.plgr.create({
            "type": "point",
            "position": Point(-0, 5.0, -1),
            "intensity": Spectrum(5.0)
        })

        self.mitsuba_scene.addChild(front_light)
        #self.mitsuba_scene.addChild(side_light);
        #self.mitsuba_scene.addChild(back_light);

    def __add_active_camera(self):
        active_view = self.scene.active_view
        camera = self.scene.active_camera
        if active_view.transparent_bg:
            pixel_format = "rgba"
        else:
            pixel_format = "rgb"

        crop_bbox = np.array(camera.crop_bbox)
        if np.amax(crop_bbox) <= 1.0:
            # bbox is relative.
            crop_bbox[:, 0] *= self.image_width
            crop_bbox[:, 1] *= self.image_height

        assert (np.all(crop_bbox >= 0))
        assert (np.all(crop_bbox[:, 0] <= self.image_width))
        assert (np.all(crop_bbox[:, 1] <= self.image_height))

        mitsuba_camera = self.plgr.create({
            "type":
            "perspective",
            "fov":
            float(camera.fovy),
            "fovAxis":
            "y",
            "toWorld":
            Transform.lookAt(Point(*camera.location),
                             Point(*camera.look_at_point),
                             Vector(*camera.up_direction)),
            "film": {
                "type": "ldrfilm",
                "width": self.image_width,
                "height": self.image_height,
                "cropOffsetX": int(crop_bbox[0, 0]),
                "cropOffsetY": int(crop_bbox[0, 1]),
                "cropWidth": int(crop_bbox[1, 0] - crop_bbox[0, 0]),
                "cropHeight": int(crop_bbox[1, 1] - crop_bbox[0, 1]),
                "banner": False,
                "pixelFormat": pixel_format,
                "rfilter": {
                    "type": "gaussian"
                }
            },
            "sampler": {
                "type": "halton",
                "sampleCount": 4,
            }
        })
        self.mitsuba_scene.addChild(mitsuba_camera)

    def __add_active_view(self):
        self.__add_view(self.scene.active_view)

    def __add_view(self, active_view, parent_transform=None):
        if len(active_view.subviews) > 0:
            for view in active_view.subviews:
                if parent_transform is None:
                    view_transform = self.__get_view_transform(active_view)
                else:
                    view_transform = parent_transform * self.__get_view_transform(
                        active_view)
                self.__add_view(view, view_transform)
            return

        if len(active_view.faces) == 0: return

        old_active_view = self.scene.active_view
        self.scene.active_view = active_view
        mesh_file, ext = self.__save_temp_mesh(active_view)
        normalize_transform = self.__get_normalize_transform(active_view)
        view_transform = self.__get_view_transform(active_view)
        if parent_transform is not None:
            view_transform = parent_transform * view_transform
        glob_transform = self.__get_glob_transform()

        total_transform = glob_transform * normalize_transform * view_transform
        material_setting = self.__get_material_setting(active_view)
        setting = {
            "type": ext[1:],
            "filename": mesh_file,
            "faceNormals": False,
            "toWorld": total_transform
        }
        setting.update(material_setting)
        target_shape = self.plgr.create(setting)
        self.mitsuba_scene.addChild(target_shape)

        M = (glob_transform * normalize_transform *
             view_transform).getMatrix()
        M = np.array([
            [M[0, 0], M[0, 1], M[0, 2], M[0, 3]],
            [M[1, 0], M[1, 1], M[1, 2], M[1, 3]],
            [M[2, 0], M[2, 1], M[2, 2], M[2, 3]],
            [M[3, 0], M[3, 1], M[3, 2], M[3, 3]],
        ])
        vertices = active_view.vertices
        vertices = np.hstack((vertices, np.ones((len(vertices), 1))))
        vertices = np.dot(M, vertices.T).T
        vertices = np.divide(vertices[:, 0:3], vertices[:, 3][:, np.newaxis])
        self.transformed_bbox_min = np.amin(vertices, axis=0)
        self.transformed_bbox_max = np.amax(vertices, axis=0)
        center = active_view.center
        floor_height = self.transformed_bbox_min[1]
        if self.floor_height is None or self.floor_height > floor_height:
            self.floor_height = floor_height

        self.scene.active_view = old_active_view

    def __add_active_primitives(self):
        self.__add_primitives(self.scene.active_view)

    def __add_primitives(self, active_view, parent_transform=None):
        if len(active_view.subviews) > 0:
            for view in active_view.subviews:
                if parent_transform is None:
                    view_transform = self.__get_view_transform(active_view)
                else:
                    view_transform = parent_transform * self.__get_view_transform(
                        active_view)
                self.__add_primitives(view, view_transform)
            return

        old_active_view = self.scene.active_view
        self.scene.active_view = active_view
        scale = active_view.scale
        normalize_transform = self.__get_normalize_transform(active_view)
        view_transform = self.__get_view_transform(active_view)
        if parent_transform is not None:
            view_transform = parent_transform * view_transform
        glob_transform = self.__get_glob_transform()
        total_transform = glob_transform * view_transform * normalize_transform

        primitives = self.scene.active_view.primitives
        for shape in primitives:
            if shape.color[3] <= 0.0: continue
            color = {
                "type": "plastic",
                "diffuseReflectance": Spectrum(shape.color[:3].tolist())
            }
            if shape.color[3] < 1.0:
                color = {
                    "type": "mask",
                    "opacity": Spectrum(active_view.alpha),
                    "bsdf": color
                }
            if isinstance(shape, Cylinder):
                if shape.radius <= 0.0: continue
                setting = self.__add_cylinder(shape)
                setting["bsdf"] = color
                setting["toWorld"] = total_transform
            elif isinstance(shape, Cone):
                if shape.radius <= 0.0: continue
                setting = self.__add_cone(shape)
                setting["bsdf"] = color
                setting["toWorld"] = total_transform * setting["toWorld"]
            elif isinstance(shape, Sphere):
                if shape.radius <= 0.0: continue
                # Due to weird behavior in Mitsuba, all transformation is
                # applied directly on radius and center variable.
                setting = self.__add_sphere(shape)
                setting["radius"] *= scale
                setting["center"] = total_transform * setting["center"]
                setting["bsdf"] = color
            else:
                raise NotImplementedError(
                    "Unknown primitive: {}".format(shape))

            mitsuba_primative = self.plgr.create(setting)
            self.mitsuba_scene.addChild(mitsuba_primative)
        self.scene.active_view = old_active_view

    def __add_sphere(self, shape):
        setting = {
            "type": "sphere",
            "radius": shape.radius,
            "center": Point(*shape.center)
        }
        return setting

    def __add_cylinder(self, shape):
        setting = {
            "type": "cylinder",
            "p0": Point(*shape.end_points[0]),
            "p1": Point(*shape.end_points[1]),
            "radius": shape.radius
        }
        return setting

    def __add_cone(self, shape):
        y_dir = np.array([0.0, 1.0, 0.0])
        v = shape.end_points[1] - shape.end_points[0]
        center = 0.5 * (shape.end_points[0] + shape.end_points[1])
        height = norm(v)
        scale = Transform.scale(Vector(shape.radius, height, shape.radius))
        axis = np.cross(y_dir, v)
        axis_len = norm(axis)
        angle = degrees(atan2(axis_len, np.dot(y_dir, v)))

        if (axis_len > 1e-6):
            axis /= axis_len
            rotate = Transform.rotate(Vector(*axis), angle)
        else:
            axis = np.array([1.0, 0.0, 0.0])
            rotate = Transform.rotate(Vector(*axis), angle)
        translate = Transform.translate(Vector(*center))

        cone_file = self.file_resolver.resolve("cone.ply")
        cone_transform = translate * rotate * scale
        setting = {
            "type": "ply",
            "filename": cone_file,
            "toWorld": cone_transform
        }
        return setting

    def __get_material_setting(self, active_view):
        setting = {}
        if self.with_texture_coordinates:
            diffuse_color = {
                "type": "checkerboard",
                "color0": Spectrum([1.0, 1.0, 1.0]),
                "color1": Spectrum([0.5, 0.5, 0.5]),
                "flipTexCoords": False,
            }
        else:
            if self.with_colors:
                diffuse_color = {"type": "vertexcolors"}
            else:
                diffuse_color = Spectrum(0.2)

        setting["bsdf"] = {
            "type": "roughplastic",
            "distribution": "beckmann",
            "alpha": 0.2,
            "diffuseReflectance": diffuse_color,
        }
        setting["bsdf"] = {
            "type": "twosided",
            "bsdf": setting["bsdf"]
        }
        if self.with_alpha:
            setting["bsdf"] = {
                "type": "mask",
                "opacity": Spectrum(active_view.alpha),
                "bsdf": setting["bsdf"]
            }
        if not self.with_colors and \
                not self.with_alpha and \
                not self.with_texture_coordinates:
            setting["subsurface"] = {
                "type": "dipole",
                "material": "Skimmilk",
                "scale": 0.5
            }
        elif self.with_texture_coordinates:
            setting["bsdf"]["bsdf"]["nonlinear"] = True
        elif not self.with_alpha and not self.with_texture_coordinates:
            setting["subsurface"] = {
                "type": "dipole",
                "material": "sprite",
                "scale": 0.5
            }
        return setting

    def __add_others(self):
        active_view = self.scene.active_view
        if active_view.with_quarter:
            self.__add_quarter()
        if active_view.with_axis:
            self.__add_axis()
        if active_view.background != "n":
            self.__add_floor()

    def __add_quarter(self):
        scale = self.scene.active_view.scale
        radius = 12.13 * scale
        thickness = 0.875 * scale
        face_scale = Transform.scale(Vector(radius))
        tail_offset = Transform.translate(Vector(0, 0, thickness))
        head_offset = Transform.translate(Vector(0, 0, -thickness)) *\
                Transform.scale(Vector(1.0, 1.0, -1.0))

        bbox_diag = 0.5 * norm(self.transformed_bbox_max -
                               self.transformed_bbox_min)
        custom_transform = Transform.translate(
            Vector(0.5, self.floor_height + radius + 0.01, -bbox_diag - 0.01))

        head_texture = self.file_resolver.resolve("head.png")
        tail_texture = self.file_resolver.resolve("tail.png")
        side_texture = self.file_resolver.resolve("side.png")

        quarter_ring = self.plgr.create({
            "type": "cylinder",
            "p0": Point(0.0, 0.0, thickness),
            "p1": Point(0.0, 0.0, -thickness),
            "radius": radius,
            "toWorld": custom_transform,
            "bsdf": {
                "type": "bumpmap",
                "texture": {
                    "type": "scale",
                    "scale": 0.01,
                    "texture": {
                        "type": "bitmap",
                        "filename": side_texture,
                        "gamma": 1.0,
                        "uscale": 100.0,
                    },
                },
                "bsdf": {
                    "type": "roughconductor",
                    "distribution": "ggx",
                    "alpha": 0.5,
                    "material": "Ni_palik"
                    #"diffuseReflectance": Spectrum(0.5)
                }
            }
        })
        head = self.plgr.create({
            "type": "disk",
            "toWorld": custom_transform * head_offset * face_scale,
            "bsdf": {
                "type": "diffuse",
                "reflectance": {
                    "type": "bitmap",
                    "filename": head_texture
                }
            }
        })
        tail = self.plgr.create({
            "type": "disk",
            "toWorld": custom_transform * tail_offset * face_scale,
            "bsdf": {
                "type": "diffuse",
                "reflectance": {
                    "type": "bitmap",
                    "filename": tail_texture
                }
            }
        })

        self.mitsuba_scene.addChild(quarter_ring)
        self.mitsuba_scene.addChild(head)
        self.mitsuba_scene.addChild(tail)

    def __add_axis(self):
        raise NotImplementedError("Adding axis is not supported")

    def __add_floor(self):
        rotate_transform = Transform.rotate(Vector(-1, 0, 0), 90)
        scale_transform = Transform.scale(Vector(100, 100, 100))
        translate_transform = Transform.translate(
            Vector(0.0, self.floor_height, 0.0))
        total_transform = translate_transform * scale_transform\
                * rotate_transform

        if self.scene.active_view.background == "d":
            reflectance = Spectrum(0.05)
        elif self.scene.active_view.background == "l":
            reflectance = Spectrum(0.5)
        else:
            reflectance = Spectrum(0.0)

        floor = self.plgr.create({
            "type": "rectangle",
            "toWorld": total_transform,
            "bsdf": {
                "type": "roughdiffuse",
                "diffuseReflectance": reflectance,
                "alpha": 0.5
            }
        })
        self.mitsuba_scene.addChild(floor)

    def __run_mitsuba(self):
        self.mitsuba_scene.configure()

        scheduler = Scheduler.getInstance()
        for i in range(multiprocessing.cpu_count()):
            scheduler.registerWorker(LocalWorker(i, "worker_{}".format(i)))
        scheduler.start()

        queue = RenderQueue()
        self.mitsuba_scene.setDestinationFile(self.image_name)

        job = RenderJob("render job: {}".format(self.image_name),
                        self.mitsuba_scene, queue)
        job.start()

        queue.waitLeft(0)
        queue.join()

        print(Statistics.getInstance().getStats())
        scheduler.stop()

    def __save_temp_mesh(self, active_view):
        basename, ext = os.path.splitext(self.image_name)
        path, name = os.path.split(basename)
        now = datetime.datetime.now()
        stamp = now.isoformat()
        tmp_dir = tempfile.gettempdir()
        ext = ".serialized"

        tmp_mesh_name = os.path.join(tmp_dir,
                                     "{}_{}{}".format(name, stamp, ext))

        vertices = active_view.vertices
        faces = active_view.faces
        voxels = active_view.voxels
        colors = active_view.vertex_colors.reshape((-1, 4), order="C")
        if self.with_texture_coordinates:
            uvs = active_view.texture_coordinates
        else:
            uvs = None

        dim = vertices.shape[1]
        num_faces, vertex_per_face = faces.shape
        if vertex_per_face == 4:
            faces = np.vstack([faces[:, [0, 1, 2]], faces[:, [0, 2, 3]]])
            vertex_per_face = 3
            num_faces *= 2
            colors = colors.reshape((-1, 4, 4), order="C")
            colors = np.vstack([
                colors[:, [0, 1, 2], :].reshape((-1, 4), order="C"),
                colors[:, [0, 2, 3], :].reshape((-1, 4), order="C")
            ])
            if uvs is not None:
                uvs = uvs.reshape((-1, 4, 2), order="C")
                uvs = np.vstack([
                    uvs[:, [0, 1, 2], :].reshape((-1, 2), order="C"),
                    uvs[:, [0, 2, 3], :].reshape((-1, 2), order="C")
                ])
        vertices = vertices[faces.ravel(order="C")]
        assert (len(colors) == len(vertices))
        faces = np.arange(len(vertices), dtype=int).reshape(
            (num_faces, vertex_per_face), order="C")

        mesh = pymesh.form_mesh(vertices, faces)

        if active_view.use_smooth_normal:
            normals = active_view.vertex_normals
        else:
            normals = None
        data = serialize_mesh(mesh, normals, colors, uvs)
        with open(tmp_mesh_name, 'wb') as fout:
            fout.write(data)
        return tmp_mesh_name, ext

    def __get_normalize_transform(self, active_view):
        centroid = active_view.center
        scale = active_view.scale

        normalize_transform = Transform.scale(Vector(scale, scale, scale)) *\
                Transform.translate(Vector(*(-1 * centroid)))
        return normalize_transform

    def __get_view_transform(self, active_view):
        transform = np.eye(4)
        transform[0:3, :] = active_view.transform.reshape((3, 4), order="F")
        view_transform = Transform(
            Matrix4x4(transform.ravel(order="C").tolist()))
        return view_transform

    def __get_glob_transform(self):
        glob_transform = Transform(
            Matrix4x4(self.global_transform.ravel(order="C").tolist()))
        return glob_transform

    @property
    def with_colors(self):
        return self.scene.active_view.with_colors

    @property
    def with_alpha(self):
        return self.scene.active_view.with_alpha

    @property
    def with_wire_frame(self):
        return self.scene.active_view.with_wire_frame

    @property
    def with_uniform_colors(self):
        return self.scene.active_view.with_uniform_colors

    @property
    def with_texture_coordinates(self):
        return self.scene.active_view.with_texture_coordinates
Esempio n. 9
0
class MitsubaRenderer(AbstractRenderer):
    def __init__(self, scene):
        super(MitsubaRenderer, self).__init__(scene);

    def render(self):
        self.__initialize();
        self.__add_integrator();
        self.__add_lights();
        self.__add_active_camera();
        self.__add_active_view();
        self.__add_active_primitives();
        self.__add_others();
        self.__run_mitsuba();

    def __initialize(self):
        self.__initialize_mitsuba_setting();
        self.__initialize_image_setting();
        self.__initialize_geometry_setting();

    def __initialize_mitsuba_setting(self):
        self.plgr = PluginManager.getInstance();
        self.output_dir = self.scene.output_dir;

        mitsuba_module_path = os.path.dirname(
                inspect.getfile(MitsubaRenderer));
        self.file_resolver = Thread.getThread().getFileResolver();
        self.file_resolver.appendPath(os.path.join(
            mitsuba_module_path, "xml_files/"));
        self.file_resolver.appendPath(os.path.join(
            mitsuba_module_path, "textures/"));
        self.file_resolver.appendPath(os.path.join(
            mitsuba_module_path, "shapes/"));

        self.mitsuba_scene = Scene();

    def __initialize_image_setting(self):
        active_view = self.scene.active_view;
        self.image_name = os.path.join(self.output_dir, active_view.name);
        self.image_width = active_view.width;
        self.image_height = active_view.height;

    def __initialize_geometry_setting(self):
        active_view = self.scene.active_view;
        if len(active_view.vertices) > 0:
            self.global_transform = self.scene.global_transform;
            global_rotation = self.scene.global_transform[:3, :3];

            vertices = (active_view.vertices - active_view.center) * active_view.scale;
            vertices = np.dot(active_view.rotation, vertices.T) +\
                    active_view.translation[:, np.newaxis];
            vertices = np.dot(global_rotation, vertices);
            vertices = vertices.T;
            self.transformed_bbox_min = np.amin(vertices, axis=0);
            self.transformed_bbox_max = np.amax(vertices, axis=0);

            center = 0.5 * (self.transformed_bbox_min + self.transformed_bbox_max);
            self.floor_height = self.transformed_bbox_min[1] - center[1];
        else:
            dim = active_view.vertices.shape[1];
            self.transformed_bbox_min = np.zeros(dim);
            self.transformed_bbox_max = np.ones(dim);
            self.floor_height = 0;
            self.global_transform = self.scene.global_transform;

    def __add_integrator(self):
        integrator = self.plgr.create({
            "type": "direct",
            "shadingSamples": 16
            #"type": "ao"
            #"type": "volpath",
            #"type": "path"
            });
        self.mitsuba_scene.addChild(integrator);

    def __add_lights(self):
        #TODO: load lights from scene
        front_light = self.plgr.create({
            "type": "sphere",
            "center": Point(3.0, 6.0, 4.0),
            "radius": 2.5,
            "emitter": {
                "type": "area",
                "radiance": Spectrum(10.0),
                "samplingWeight": 10.0
                }
            });

        side_light = self.plgr.create({
            "type": "point",
            "position": Point(4.0, 4.0, -1.0),
            "intensity": Spectrum(5.0)
            });

        back_light = self.plgr.create({
            "type": "point",
            "position": Point(-0, 5.0, -1),
            "intensity": Spectrum(5.0)
            });

        self.mitsuba_scene.addChild(front_light);
        #self.mitsuba_scene.addChild(side_light);
        #self.mitsuba_scene.addChild(back_light);

    def __add_active_camera(self):
        active_view = self.scene.active_view;
        camera = self.scene.active_camera;
        if active_view.transparent_bg:
            pixel_format = "rgba";
        else:
            pixel_format = "rgb";

        crop_bbox = np.array(camera.crop_bbox);
        if np.amax(crop_bbox) <= 1.0:
            # bbox is relative.
            crop_bbox[:,0] *= self.image_width;
            crop_bbox[:,1] *= self.image_height;

        assert(np.all(crop_bbox >= 0));
        assert(np.all(crop_bbox[:,0] <= self.image_width));
        assert(np.all(crop_bbox[:,1] <= self.image_height));

        mitsuba_camera = self.plgr.create({
            "type": "perspective",
            "fov": float(camera.fovy),
            "fovAxis": "y",
            "toWorld": Transform.lookAt(
                Point(*camera.location),
                Point(*camera.look_at_point),
                Vector(*camera.up_direction)),
            "film": {
                "type": "ldrfilm",
                "width": self.image_width,
                "height": self.image_height,
                "cropOffsetX": int(crop_bbox[0,0]),
                "cropOffsetY": int(crop_bbox[0,1]),
                "cropWidth": int(crop_bbox[1,0] - crop_bbox[0,0]),
                "cropHeight": int(crop_bbox[1,1] - crop_bbox[0,1]),
                "banner": False,
                "pixelFormat": pixel_format,
                "rfilter": {
                    "type": "gaussian"
                    }
                },
            "sampler": {
                "type": "halton",
                "sampleCount": 8
                }
            });
        self.mitsuba_scene.addChild(mitsuba_camera);

    def __add_active_view(self):
        self.__add_view(self.scene.active_view);

    def __add_view(self, active_view):
        if len(active_view.subviews) > 0:
            for view in active_view.subviews:
                self.__add_view(view);
            return;

        if len(active_view.faces) == 0: return;

        old_active_view = self.scene.active_view;
        self.scene.active_view = active_view;
        mesh_file = self.__save_temp_mesh(active_view);
        normalize_transform = self.__get_normalize_transform(active_view);
        view_transform = self.__get_view_transform(active_view);
        glob_transform = self.__get_glob_transform();

        total_transform = glob_transform * normalize_transform * view_transform;
        material_setting = self.__get_material_setting(active_view);
        setting = {
                "type": "ply",
                "filename": mesh_file,
                "faceNormals": True,
                "toWorld": total_transform
                }
        setting.update(material_setting);
        target_shape = self.plgr.create(setting);
        self.mitsuba_scene.addChild(target_shape);
        self.scene.active_view = old_active_view;

    def __add_active_primitives(self):
        self.__add_primitives(self.scene.active_view);

    def __add_primitives(self, active_view):
        if len(active_view.subviews) > 0:
            for view in active_view.subviews:
                self.__add_primitives(view);
            return;

        old_active_view = self.scene.active_view;
        self.scene.active_view = active_view;
        scale = active_view.scale;
        normalize_transform = self.__get_normalize_transform(active_view);
        view_transform = self.__get_view_transform(active_view);
        glob_transform = self.__get_glob_transform();
        total_transform = glob_transform * normalize_transform * view_transform;

        primitives = self.scene.active_view.primitives;
        for shape in primitives:
            if shape.color[3] <= 0.0: continue;
            color = {
                    "type": "plastic",
                    "diffuseReflectance": Spectrum(shape.color[:3].tolist())
                    };
            if isinstance(shape, Cylinder):
                if shape.radius <= 0.0: continue;
                setting = self.__add_cylinder(shape);
                setting["bsdf"] = color;
                setting["toWorld"] = total_transform
            elif isinstance(shape, Cone):
                if shape.radius <= 0.0: continue;
                setting = self.__add_cone(shape);
                setting["bsdf"] = color;
                setting["toWorld"] = total_transform * setting["toWorld"];
            elif isinstance(shape, Sphere):
                if shape.radius <= 0.0: continue;
                # Due to weird behavior in Mitsuba, all transformation is
                # applied directly on radius and center variable.
                setting = self.__add_sphere(shape);
                setting["radius"] *= scale;
                setting["center"] = total_transform * setting["center"];
                setting["bsdf"] = color;
            else:
                raise NotImplementedError("Unknown primitive: {}".format(shape));

            mitsuba_primative = self.plgr.create(setting);
            self.mitsuba_scene.addChild(mitsuba_primative);
        self.scene.active_view = old_active_view;

    def __add_sphere(self, shape):
        setting = {
                "type": "sphere",
                "radius": shape.radius,
                "center": Point(*shape.center)
                };
        return setting;

    def __add_cylinder(self, shape):
        setting = {
                "type": "cylinder",
                "p0": Point(*shape.end_points[0]),
                "p1": Point(*shape.end_points[1]),
                "radius": shape.radius
                };
        return setting;

    def __add_cone(self, shape):
        y_dir = np.array([0.0, 1.0, 0.0]);
        v = shape.end_points[1] - shape.end_points[0];
        center = 0.5 * (shape.end_points[0] + shape.end_points[1]);
        height = norm(v);
        scale = Transform.scale(
                Vector(shape.radius, height, shape.radius));
        axis = np.cross(y_dir, v);
        axis_len = norm(axis);
        angle = degrees(atan2(axis_len, np.dot(y_dir, v)));

        if (axis_len > 1e-6):
            axis /= axis_len;
            rotate = Transform.rotate(Vector(*axis), angle);
        else:
            axis = np.array([1.0, 0.0, 0.0]);
            rotate = Transform.rotate(Vector(*axis), angle);
        translate = Transform.translate(Vector(*center));

        cone_file = self.file_resolver.resolve("cone.ply");
        cone_transform = translate * rotate * scale;
        setting = {
                "type": "ply",
                "filename": cone_file,
                "toWorld": cone_transform
                }
        return setting;

    def __get_material_setting(self, active_view):
        setting = {};
        if self.with_wire_frame:
            diffuse_color = {
                    "type": "wireframe",
                    "edgeColor": Spectrum(0.0),
                    "lineWidth": active_view.line_width,
                    };
            if self.with_uniform_colors:
                diffuse_color["interiorColor"] =\
                        Spectrum(active_view.vertex_colors[0][0].tolist()[0:3]);
            #elif self.with_colors:
            #    diffuse_color["interiorColor"] = { "type": "vertexcolors" }
        else:
            if self.with_colors:
                diffuse_color = { "type": "vertexcolors" }
            else:
                diffuse_color = Spectrum(0.2);

        setting["bsdf"] = {
                "type": "roughplastic",
                "distribution": "beckmann",
                "alpha": 0.2,
                "diffuseReflectance": diffuse_color
                };
        setting["bsdf"] = {
                "type": "twosided",
                "bsdf": setting["bsdf"]
                };
        if self.with_alpha:
            setting["bsdf"] = {
                    "type": "mask",
                    "opacity": Spectrum(active_view.alpha),
                    "bsdf": setting["bsdf"]
                    };
        if not self.with_colors and not self.with_alpha and not self.with_wire_frame:
            setting["subsurface"] = {
                    "type": "dipole",
                    "material": "Skimmilk",
                    "scale": 0.5
                    };
        elif not self.with_alpha:
            setting["subsurface"] = {
                    "type": "dipole",
                    "material": "sprite",
                    "scale": 0.5
                    };
        return setting;

    def __add_others(self):
        active_view = self.scene.active_view;
        if active_view.with_quarter:
            self.__add_quarter();
        if active_view.with_axis:
            self.__add_axis();
        if active_view.background != "n":
            self.__add_floor();

    def __add_quarter(self):
        scale = self.scene.active_view.scale;
        radius = 12.13 * scale;
        thickness = 0.875 * scale;
        face_scale = Transform.scale(Vector(radius));
        tail_offset = Transform.translate(Vector(0, 0, thickness));
        head_offset = Transform.translate(Vector(0, 0, -thickness)) *\
                Transform.scale(Vector(1.0, 1.0, -1.0));

        bbox_diag = 0.5 * norm(
                self.transformed_bbox_max - self.transformed_bbox_min);
        custom_transform = Transform.translate(Vector(
            0.5,
            self.floor_height + radius + 0.01,
            -bbox_diag - 0.01));

        head_texture = self.file_resolver.resolve("head.png");
        tail_texture = self.file_resolver.resolve("tail.png");
        side_texture = self.file_resolver.resolve("side.png");

        quarter_ring = self.plgr.create({
            "type": "cylinder",
            "p0": Point(0.0, 0.0, thickness),
            "p1": Point(0.0, 0.0, -thickness),
            "radius": radius,
            "toWorld": custom_transform,
            "bsdf": {
                "type": "bumpmap",
                "texture": {
                    "type": "scale",
                    "scale": 0.01,
                    "texture": {
                        "type": "bitmap",
                        "filename": side_texture,
                        "gamma": 1.0,
                        "uscale": 100.0,
                        },
                    },
                "bsdf": {
                    "type": "roughconductor",
                    "distribution": "ggx",
                    "alpha": 0.5,
                    "material": "Ni_palik"
                    #"diffuseReflectance": Spectrum(0.5)
                    }
                }
            });
        head = self.plgr.create({
            "type": "disk",
            "toWorld": custom_transform * head_offset * face_scale,
            "bsdf": {
                "type": "diffuse",
                "reflectance": {
                    "type": "bitmap",
                    "filename": head_texture
                    }
                }
            });
        tail = self.plgr.create({
            "type": "disk",
            "toWorld": custom_transform * tail_offset * face_scale,
            "bsdf": {
                "type": "diffuse",
                "reflectance": {
                    "type": "bitmap",
                    "filename": tail_texture
                    }
                }
            });

        self.mitsuba_scene.addChild(quarter_ring);
        self.mitsuba_scene.addChild(head);
        self.mitsuba_scene.addChild(tail);

    def __add_axis(self):
        raise NotImplementedError("Adding axis is not supported");

    def __add_floor(self):
        rotate_transform = Transform.rotate(Vector(-1, 0, 0), 90);
        scale_transform = Transform.scale(Vector(100, 100, 100));
        translate_transform = Transform.translate(
                Vector(0.0, self.floor_height, 0.0));
        total_transform = translate_transform * scale_transform\
                * rotate_transform;

        if self.scene.active_view.background == "d":
            reflectance = Spectrum(0.05);
        elif self.scene.active_view.background == "l":
            reflectance = Spectrum(0.5);
        else:
            reflectance = Spectrum(0.0);

        floor = self.plgr.create({
            "type": "rectangle",
            "toWorld": total_transform,
            "bsdf": {
                "type": "roughdiffuse",
                "diffuseReflectance": reflectance,
                "alpha": 0.5
                }
            });
        self.mitsuba_scene.addChild(floor);

    def __run_mitsuba(self):
        self.mitsuba_scene.configure();

        scheduler = Scheduler.getInstance();
        for i in range(multiprocessing.cpu_count()):
            scheduler.registerWorker(LocalWorker(i, "worker_{}".format(i)));
        scheduler.start();

        queue = RenderQueue();
        self.mitsuba_scene.setDestinationFile(self.image_name);

        job = RenderJob("render job: {}".format(
            self.image_name), self.mitsuba_scene, queue);
        job.start();

        queue.waitLeft(0);
        queue.join();

        print(Statistics.getInstance().getStats());
        scheduler.stop();

    def __save_temp_mesh(self, active_view):
        basename, ext = os.path.splitext(self.image_name);
        path, name = os.path.split(basename);
        now = datetime.datetime.now()
        stamp = now.isoformat();
        tmp_dir = tempfile.gettempdir();
        tmp_mesh_name = os.path.join(tmp_dir, "{}_{}.ply".format(name,
            stamp));

        vertices = active_view.vertices;
        faces = active_view.faces;
        voxels = active_view.voxels;

        dim = vertices.shape[1];
        num_faces, vertex_per_face = faces.shape;
        vertices = vertices[faces.ravel(order="C")];
        colors = active_view.vertex_colors.reshape((-1, 4), order="C");
        colors *= 255;
        faces = np.arange(len(vertices), dtype=int).reshape(
                (num_faces, vertex_per_face), order="C");

        mesh = pymesh.form_mesh(vertices, faces);
        mesh.add_attribute("red");
        mesh.set_attribute("red", colors[:,0].ravel());
        mesh.add_attribute("green");
        mesh.set_attribute("green", colors[:,1].ravel());
        mesh.add_attribute("blue");
        mesh.set_attribute("blue", colors[:,2].ravel());

        pymesh.save_mesh(tmp_mesh_name, mesh,
                "red", "green", "blue", ascii=True, use_float=True, anonymous=True);
        return tmp_mesh_name;

    def __get_normalize_transform(self, active_view):
        centroid = active_view.center
        scale = active_view.scale

        normalize_transform = Transform.scale(Vector(scale, scale, scale)) *\
                Transform.translate(Vector(*(-1 * centroid)));
        return normalize_transform;

    def __get_view_transform(self, active_view):
        transform = np.eye(4);
        transform[0:3, :] = active_view.transform.reshape((3, 4), order="F");
        view_transform = Transform(Matrix4x4(transform.ravel(order="C").tolist()));
        return view_transform;

    def __get_glob_transform(self):
        glob_transform = Transform(
                Matrix4x4(self.global_transform.ravel(order="C").tolist()));
        return glob_transform;

    @property
    def with_colors(self):
        return self.scene.active_view.with_colors;

    @property
    def with_alpha(self):
        return self.scene.active_view.alpha != 1.0;

    @property
    def with_wire_frame(self):
        return self.scene.active_view.with_wire_frame;

    @property
    def with_uniform_colors(self):
        return self.scene.active_view.with_uniform_colors;
Esempio n. 10
0
 def addSceneLights(self):
     """ Adding all the pre-setted lights to the scene"""
     currScene = Scene(self.scene)
     for light in self.light:
         currScene.addChild(light)
     self.scene = currScene
def init_scene():

    pmgr = PluginManager.getInstance()
    scene = Scene()
    scene.setDestinationFile('renderedResult')

    camera_pos = Vector(0, 0.0, -12)
    model_pos = Vector(0.5, -0.5, -2.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename=mesh_file)
    b = MitsubaShape(shape_type=MitsubaShape.CUBE, to_world=Transform.translate(model_pos))

    integrator = create_integrator(RenderTargetType.DEPTH, hide_emitters=False)
    #integrator = create_integrator(IntegratorType.DIRECT, hide_emitters=True)
    #integrator = create_integrator(RenderTargetType.NORMAL, hide_emitters=True)

    print(integrator.config)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.HDR, image_width, image_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type' : 'scene',
              'integrator' : {
                'type' : 'multichannel',
                # 'a': {
                #     'type' : 'path'
                # },
                # 'b': {
                #     'type' : 'field',
                #     'field' : 'distance',
                #     'undefined': 0.0
                # },
                'c': {
                    'type' : 'field',
                    'field' : 'distance',
                    'undefined': 0.0
                }
            },
            'sphere' : {
                'type' : 'sphere',
                'bsdf' : {
                    'type' : 'dielectric',
                    'reflectance' : Spectrum(0.4)
                }
            },
            'envmap' : {
                'type' : 'sunsky',
                'albedo' : Spectrum(0.5)
            },
            'sensor' : {
                'type' : 'perspective',
                'toWorld' : Transform.translate(Vector(0, 0, 0)),
                'sampler' : {
                    'type' : 'halton',
                    'sampleCount' : 64

                },
                'film' : {
                    'type' : 'ldrfilm',
                    'width' : 500,
                    'height' : 500,
                    'pixelFormat': "rgb",
                    'channelNames': "normal"
                }
            },

        }

    # # Add a shape
    # scene.addChild(pmgr.create({
    # 'type' : 'sphere',
    # 'center' : Point(0, 0, 0),
    # 'radius' : 1.0,
    # 'bsdf' : {
    # 'type' : 'diffuse',
    # 'reflectance' : Spectrum(0.4)
    # }
    # }))




    scene.addChild(pmgr.create(scene_config))
    scene.configure()


    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())
    # scene_node = pmgr.create(scene_config)
    # scene.addChild(scene_node)
    # scene.configure()

    scene.initialize()
    sceneResID = scheduler.registerResource(scene)

    num_views = 1
    step_size = 360/(num_views)
    for i in range(num_views):
        destination = 'results/result_%03i' % i
        # Create a shallow copy of the scene so that the queue can tell apart the two
        # rendering processes. This takes almost no extra memory
        newScene = mitsuba.render.Scene(scene)
        pmgr = PluginManager.getInstance()
        newSensor = pmgr.createObject(scene.getSensor().getProperties())
        # <change the position of 'newSensor' here>

        rotationCur = Transform.rotate(Vector(0, 1, 0), i*step_size)
        new_pos = rotationCur*camera_pos
        new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
        newSensor.setWorldTransform(new_transform)

        newFilm = pmgr.createObject(scene.getFilm().getProperties())
        newFilm.configure()
        newSensor.addChild(newFilm)
        newSensor.configure()
        newScene.addSensor(newSensor)
        newScene.setSensor(newSensor)
        newScene.setSampler(scene.getSampler())
        newScene.setDestinationFile(destination)
        # Create a render job and insert it into the queue. Note how the resource
        # ID of the original scene is provided to avoid sending the full scene
        # contents over the network multiple times.
        job = RenderJob('myRenderJob' + str(i), scene, queue, sceneResID)
        job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
def init_scene():

    pmgr = PluginManager.getInstance()
    scene = Scene()
    scene.setDestinationFile('renderedResult')

    camera_pos = Vector(0, 0.0, -12)
    model_pos = Vector(0.5, -0.5, -2.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename='/media/adrian/Data/Datasets/train/02691156/model_0000003.obj')
    b = MitsubaShape(shape_type=MitsubaShape.CUBE, to_world=Transform.translate(model_pos))

    integrator = Integrator(Integrator.DIRECT, hide_emitters=True)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.LDR, image_width, image_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type' : 'scene',
        'a': a.config,
        'envmap' : {
             'type' : 'sunsky',
             'hour' : 12.0,
             'albedo' : Spectrum(1.0),
             'samplingWeight' : 1.0,
        },
        # 'envmap' : {
        #      'type' : 'constant',
        #      #'hour' : 10.0,
        #      'radiance' : Spectrum(1.0),
        #      'samplingWeight' : 0.5
        # },
        # 'integrator' : {
        #     'type' : 'multichannel',
        #     'depth' : {
        #         'type' : 'field',
        #         'field' : 'distance'
        #     },
        # },
        'integrator' : integrator.config,
        'sensor' : sensor.config
    }

    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())
    scene_node = pmgr.create(scene_config)
    scene.addChild(scene_node)
    scene.configure()

    scene.initialize()
    sceneResID = scheduler.registerResource(scene)

    num_views = 6
    step_size = 360/(num_views)
    for i in range(num_views):
        destination = 'results/result_%03i' % i
        # Create a shallow copy of the scene so that the queue can tell apart the two
        # rendering processes. This takes almost no extra memory
        newScene = mitsuba.render.Scene(scene)
        pmgr = PluginManager.getInstance()
        newSensor = pmgr.createObject(scene.getSensor().getProperties())
        # <change the position of 'newSensor' here>

        rotationCur = Transform.rotate(Vector(0, 1, 0), i*step_size)
        new_pos = rotationCur*camera_pos
        new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
        newSensor.setWorldTransform(new_transform)

        newFilm = pmgr.createObject(scene.getFilm().getProperties())
        newFilm.configure()
        newSensor.addChild(newFilm)
        newSensor.configure()
        newScene.addSensor(newSensor)
        newScene.setSensor(newSensor)
        newScene.setSampler(scene.getSampler())
        newScene.setDestinationFile(destination)
        # Create a render job and insert it into the queue. Note how the resource
        # ID of the original scene is provided to avoid sending the full scene
        # contents over the network multiple times.
        job = RenderJob('myRenderJob' + str(i), newScene, queue, sceneResID)
        job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
Esempio n. 13
0
        class ApiExportContext(ExportContextBase):
            '''
            Python API
            '''

            EXPORT_API_TYPE = 'API'

            thread = None
            scheduler = None
            pmgr = None
            scene = None

            def __init__(self):
                super().__init__()

                self.thread = Thread.registerUnmanagedThread('exporter')
                self.thread.setFileResolver(main_fresolver)
                self.thread.setLogger(main_logger)

                self.pmgr = PluginManager.getInstance()
                self.scene = Scene()

            # Funtions binding to Mitsuba extension API

            def spectrum(self, value, mode=''):
                if not mode:
                    mode = self.color_mode

                spec = None

                if isinstance(value, (dict)):
                    if 'type' in value:
                        if value['type'] in {'rgb', 'srgb', 'spectrum'}:
                            spec = self.spectrum(value['value'], value['type'])

                        elif value['type'] == 'blackbody':
                            spec = Spectrum()
                            spec.fromContinuousSpectrum(BlackBodySpectrum(value['temperature']))
                            spec.clampNegative()
                            spec = spec * value['scale']

                elif isinstance(value, (float, int)):
                    spec = Spectrum(value)

                elif isinstance(value, (str)):
                    contspec = InterpolatedSpectrum(self.get_export_path(value))
                    spec = Spectrum()
                    spec.fromContinuousSpectrum(contspec)
                    spec.clampNegative()

                else:
                    try:
                        items = list(value)

                        for i in items:
                            if not isinstance(i, (float, int, tuple)):
                                raise Exception('Error: spectrum list contains an unknown type')

                    except:
                        items = None

                    if items:
                        totitems = len(items)

                        if isinstance(items[0], (float, int)):
                            if totitems == 3 or totitems == 4:
                                spec = Spectrum()

                                if mode == 'srgb':
                                    spec.fromSRGB(items[0], items[1], items[2])

                                else:
                                    spec.fromLinearRGB(items[0], items[1], items[2])

                            elif totitems == 1:
                                spec = Spectrum(items[0])

                            else:
                                MtsLog('Expected spectrum items to be 1, 3 or 4, got %d.' % len(items), type(items), items)

                        else:
                            spec = Spectrum()
                            contspec = InterpolatedSpectrum()

                            for spd in items:
                                (wlen, val) = spd
                                contspec.append(wlen, val)

                            spec.fromContinuousSpectrum(contspec)
                            spec.clampNegative()

                    else:
                        MtsLog('Unknown spectrum type.', type(value), value)

                if spec is None:
                    spec = Spectrum(0.0)

                return spec

            def vector(self, x, y, z):
                # Blender is Z up but Mitsuba is Y up, convert the vector
                return Vector(x, z, -y)

            def point(self, x, y, z):
                # Blender is Z up but Mitsuba is Y up, convert the point
                return Point(x, z, -y)

            def transform_lookAt(self, origin, target, up, scale=None):
                # Blender is Z up but Mitsuba is Y up, convert the lookAt
                transform = Transform.lookAt(
                    Point(origin[0], origin[2], -origin[1]),
                    Point(target[0], target[2], -target[1]),
                    Vector(up[0], up[2], -up[1])
                )

                if scale is not None:
                    transform *= Transform.scale(Vector(scale, scale, 1))

                return transform

            def animated_lookAt(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, (origin, target, up, scale)) in motion:
                        transform.appendTransform(t, self.transform_lookAt(origin, target, up, scale))

                else:
                    (origin, target, up, scale) = motion[0][1]
                    transform = self.transform_lookAt(origin, target, up, scale)

                return transform

            def transform_matrix(self, matrix):
                # Blender is Z up but Mitsuba is Y up, convert the matrix
                global_matrix = axis_conversion(to_forward="-Z", to_up="Y").to_4x4()
                l = matrix_to_list(global_matrix * matrix)
                mat = Matrix4x4(l)
                transform = Transform(mat)

                return transform

            def animated_transform(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, m) in motion:
                        transform.appendTransform(t, self.transform_matrix(m))

                else:
                    transform = self.transform_matrix(motion[0][1])

                return transform

            def configure(self):
                '''
                Call Scene configure
                '''

                self.scene.addChild(self.pmgr.create(self.scene_data))
                self.scene.configure()

                # Reset the volume redundancy check
                ExportedVolumes.reset_vol_list()

            def cleanup(self):
                self.exit()

            def exit(self):
                # Do nothing
                pass
Esempio n. 14
0
def renderVPLS(vpls, cam, scene, target):
    ''' render VPLS having the camera looking at the desired target the scene. The result will 
	be an image that will be used to define the 3D space where the camera and
	object can be placed in the environment. Target can be either be 'roof' or 'floor' '''

    pmgr = PluginManager.getInstance()
    scheduler = Scheduler.getInstance()

    if (target == 'roof'):
        target_height = scene.HeightRoof
    else:
        target_height = scene.HeightFloor

    # Start up the scheduling system with one worker per local core
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()

    # Create a queue for tracking render jobs
    queue = RenderQueue()

    nVPLS = int(vpls[0][1]) * 4

    scene = Scene()

    for i in xrange(1, nVPLS, 4):
        if (float(vpls[i][2]) == target_height):
            scene.addChild(
                pmgr.create({
                    'type':
                    'sphere',
                    'center':
                    Point(float(vpls[i][1]), float(vpls[i][2]),
                          float(vpls[i][3])),
                    'radius':
                    1.0,
                    'emitter':
                    pmgr.create({
                        'type': 'area',
                        'radiance': Spectrum(10.),
                    })
                }))

    scene.addChild(
        pmgr.create({
            'type':
            'perspective',
            'toWorld':
            Transform.lookAt(
                Point(cam.origin[0], cam.origin[1], cam.origin[2]),
                Point(cam.target[0], cam.target[1], cam.target[2]),
                Vector(cam.up[0], cam.up[1], cam.up[2])),
            'fov':
            cam.fov,
            'film': {
                'type': 'ldrfilm',
                'width': cam.width,
                'height': cam.height,
                'banner': False,
            },
            'sampler': {
                'type': 'halton',
                'sampleCount': 1
            },
        }))

    scene.addChild(pmgr.create({'type': 'direct'}))
    scene.configure()

    if (target == 'roof'):
        filename = 'renderVPLSRoof'
    else:
        filename = 'renderVPLSFloor'

    scene.setDestinationFile(filename)

    # Create a render job and insert it into the queue
    job = RenderJob('myRenderJob', scene, queue)
    job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
    queue.join()

    scheduler.stop()
Esempio n. 15
0
        class ApiExportContext(ExportContextBase):
            '''
            Python API
            '''

            EXPORT_API_TYPE = 'API'

            thread = None
            scheduler = None
            pmgr = None
            scene = None

            def __init__(self):
                super().__init__()

                self.thread = Thread.registerUnmanagedThread('exporter')
                self.thread.setFileResolver(main_fresolver)
                self.thread.setLogger(main_logger)

                self.pmgr = PluginManager.getInstance()
                self.scene = Scene()

            # Funtions binding to Mitsuba extension API

            def spectrum(self, value, mode=''):
                if not mode:
                    mode = self.color_mode

                spec = None

                if isinstance(value, (dict)):
                    if 'type' in value:
                        if value['type'] in {'rgb', 'srgb', 'spectrum'}:
                            spec = self.spectrum(value['value'], value['type'])

                        elif value['type'] == 'blackbody':
                            spec = Spectrum()
                            spec.fromContinuousSpectrum(
                                BlackBodySpectrum(value['temperature']))
                            spec.clampNegative()
                            spec = spec * value['scale']

                elif isinstance(value, (float, int)):
                    spec = Spectrum(value)

                elif isinstance(value, (str)):
                    contspec = InterpolatedSpectrum(
                        self.get_export_path(value))
                    spec = Spectrum()
                    spec.fromContinuousSpectrum(contspec)
                    spec.clampNegative()

                else:
                    try:
                        items = list(value)

                        for i in items:
                            if not isinstance(i, (float, int, tuple)):
                                raise Exception(
                                    'Error: spectrum list contains an unknown type'
                                )

                    except:
                        items = None

                    if items:
                        totitems = len(items)

                        if isinstance(items[0], (float, int)):
                            if totitems == 3 or totitems == 4:
                                spec = Spectrum()

                                if mode == 'srgb':
                                    spec.fromSRGB(items[0], items[1], items[2])

                                else:
                                    spec.fromLinearRGB(items[0], items[1],
                                                       items[2])

                            elif totitems == 1:
                                spec = Spectrum(items[0])

                            else:
                                MtsLog(
                                    'Expected spectrum items to be 1, 3 or 4, got %d.'
                                    % len(items), type(items), items)

                        else:
                            spec = Spectrum()
                            contspec = InterpolatedSpectrum()

                            for spd in items:
                                (wlen, val) = spd
                                contspec.append(wlen, val)

                            spec.fromContinuousSpectrum(contspec)
                            spec.clampNegative()

                    else:
                        MtsLog('Unknown spectrum type.', type(value), value)

                if spec is None:
                    spec = Spectrum(0.0)

                return spec

            def vector(self, x, y, z):
                # Blender is Z up but Mitsuba is Y up, convert the vector
                return Vector(x, z, -y)

            def point(self, x, y, z):
                # Blender is Z up but Mitsuba is Y up, convert the point
                return Point(x, z, -y)

            def transform_lookAt(self, origin, target, up, scale=None):
                # Blender is Z up but Mitsuba is Y up, convert the lookAt
                transform = Transform.lookAt(
                    Point(origin[0], origin[2], -origin[1]),
                    Point(target[0], target[2], -target[1]),
                    Vector(up[0], up[2], -up[1]))

                if scale is not None:
                    transform *= Transform.scale(Vector(scale, scale, 1))

                return transform

            def animated_lookAt(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, (origin, target, up, scale)) in motion:
                        transform.appendTransform(
                            t, self.transform_lookAt(origin, target, up,
                                                     scale))

                else:
                    (origin, target, up, scale) = motion[0][1]
                    transform = self.transform_lookAt(origin, target, up,
                                                      scale)

                return transform

            def transform_matrix(self, matrix):
                # Blender is Z up but Mitsuba is Y up, convert the matrix
                global_matrix = axis_conversion(to_forward="-Z",
                                                to_up="Y").to_4x4()
                l = matrix_to_list(global_matrix * matrix)
                mat = Matrix4x4(l)
                transform = Transform(mat)

                return transform

            def animated_transform(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, m) in motion:
                        transform.appendTransform(t, self.transform_matrix(m))

                else:
                    transform = self.transform_matrix(motion[0][1])

                return transform

            def configure(self):
                '''
                Call Scene configure
                '''

                self.scene.addChild(self.pmgr.create(self.scene_data))
                self.scene.configure()

                # Reset the volume redundancy check
                ExportedVolumes.reset_vol_list()

            def cleanup(self):
                self.exit()

            def exit(self):
                # Do nothing
                pass