Example #1
0
def test03_depth_packet_stairs(variant_packet_rgb):
    from mitsuba.core import Ray3f as Ray3fX, Properties
    from mitsuba.render import Scene

    if mitsuba.core.MTS_ENABLE_EMBREE:
        pytest.skip("EMBREE enabled")

    props = Properties("scene")
    props["_unnamed_0"] = create_stairs_packet(11)
    scene = Scene(props)

    mitsuba.set_variant("scalar_rgb")
    from mitsuba.core import Ray3f, Vector3f

    n = 4
    inv_n = 1.0 / (n - 1)
    rays = Ray3fX.zero(n * n)
    d = [0, 0, -1]
    wavelengths = []

    for x in range(n):
        for y in range(n):
            o = Vector3f(x * inv_n, y * inv_n, 2)
            o = o * 0.999 + 0.0005
            rays[x * n + y] = Ray3f(o, d, 0, 100, 0.5, wavelengths)

    res_naive = scene.ray_intersect_naive(rays)
    res = scene.ray_intersect(rays)
    res_shadow = scene.ray_test(rays)

    # TODO: spot-check (here, we only check consistency)
    assert ek.all(res_shadow == res.is_valid())
    compare_results(res_naive, res, atol=1e-6)
Example #2
0
            def __init__(self):
                super().__init__()

                self.thread = Thread.registerUnmanagedThread('exporter')
                self.thread.setFileResolver(main_fresolver)
                self.thread.setLogger(main_logger)

                self.pmgr = PluginManager.getInstance()
                self.scene = Scene()
Example #3
0
def build_scene(env):
    #architecture is :
    #-env.name
    #--exterior
    #----ingredients
    #--compartments
    #----surface
    #------ingredients
    #----interior
    #------ingredients
    #create the document and a node for rootenv
    pmgr=PluginManager.getInstance()
    scene = Scene();
    # Create a sensor,film&samplegenerator
    scene.addChild(setEnv(pmgr))
    scene.addChild(setIntegrator(pmgr))
    scene.addChild(setLight(pmgr))
        
    root_env=scene.createNullObject(str(env.name))
    r =  env.exteriorRecipe
    if r : scene,root_env = buildRecipe(r,r.name,scene,root_env)
    for o in env.compartments:
        rs = o.surfaceRecipe
        if rs : 
            p,s,bb=up (767.0) #used for lipids
            pp,ss,bbsurface = up (700.0)
            bbsurface = numpy.array([[p-ss/2.0],[p+ss/2.0]])
            scene,root_env = buildRecipe(rs,str(o.name)+"_surface",scene,root_env,mask=bbsurface)
        ri = o.innerRecipe
        if ri : 
            pp,ss,bbmatrix = up (650.0)
            bbmatrix = numpy.array([[p-ss/2.0],[p+ss/2.0]])
            scene,root_env = buildRecipe(ri,str(o.name)+"_interior",scene,root_env,mask=bbsurface)
        #build the compartments geometry
#        buildCompartmentsGeom(o,scene,parent=root_env)
        #['ID']['node', 'color', 'id', 'instances', 'mesh', 'parentmesh']
        
    fname = "/home/ludo/"+env.name+str(env.version)+".mxs"
    ok = scene.writeMXS(str(fname));
    print "write",ok
    if not ok:
		print("Error saving MXS ('/home/ludo/"+env.name+str(env.version)+".mxs')");
		return 0;
    return scene
    
#Settheintegrator
    scene.addChild(pmgr.create({
'
type
'
:
'
direct
'
}))
Example #4
0
    def __initialize_mitsuba_setting(self):
        self.plgr = PluginManager.getInstance()
        self.output_dir = self.scene.output_dir

        mitsuba_module_path = os.path.dirname(inspect.getfile(MitsubaRenderer))
        self.file_resolver = Thread.getThread().getFileResolver()
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "xml_files/"))
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "textures/"))
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "shapes/"))

        self.mitsuba_scene = Scene()
Example #5
0
def make_synthetic_scene(n_steps):
    from mitsuba.core import Properties
    from mitsuba.render import Scene

    props = Properties("scene")
    props["_unnamed_0"] = create_stairs(n_steps)
    return Scene(props)
def render_scene(passes, scene_config, render_config):

    camera_params = render_config["CameraParams"]
    start= render_config["iteration_start"]
    end= render_config["iteration_end"]
    output_path = render_config["OutputPath"]
    pmgr = PluginManager.getInstance()

    for index in range(start,end):
        camera = camera_params[index]
        new_transform = camera["new_transform"]
        for p in passes:

            destination = output_path + '%03i' % (index)
            if p.type == RenderTargetType.DEPTH:
                destination += "_d"
            elif p.type == RenderTargetType.NORMAL:
                destination += "_n"

            scene = Scene()
            pass_config = scene_config
            #Set the pass integrator
            pass_config['integrator'] = p.config
            sceneResID = register_scene_config(scene, pmgr, scene_config)

            newScene = mitsuba.render.Scene(scene)
            pmgr = PluginManager.getInstance()
            newSensor = pmgr.createObject(scene.getSensor().getProperties())
            newSensor.setWorldTransform(new_transform)

            newFilm = pmgr.createObject(scene.getFilm().getProperties())
            newFilm.configure()
            newSensor.addChild(newFilm)
            newSensor.configure()
            newScene.addSensor(newSensor)
            newScene.setSensor(newSensor)
            newScene.setSampler(scene.getSampler())
            newScene.setDestinationFile(destination)

            # Create a render job and insert it into the queue. Note how the resource
            # ID of the original scene is provided to avoid sending the full scene
            # contents over the network multiple times.
            j = RenderJob('myRenderJob' + str(index), newScene, queue, sceneResID)
            j.start()

        queue.waitLeft(0)
Example #7
0
            def __init__(self):
                super().__init__()

                self.thread = Thread.registerUnmanagedThread('exporter')
                self.thread.setFileResolver(main_fresolver)
                self.thread.setLogger(main_logger)

                self.pmgr = PluginManager.getInstance()
                self.scene = Scene()
def modifyScene(scene, index, config, pmgr, destinationFolder):

    #for i in range(number_of_renderings):
    destination = destinationFolder + '-result_%03i' % index

    # Create a shallow copy of the scene so that the queue can tell apart the two
    # rendering processes. This takes almost no extra memory
    newScene = Scene(scene)

    # Create a sensor, film & sample generator
    newSensor = createSensor(pmgr, config, index)
    newSensor.configure()
    newScene.addSensor(newSensor)
    newScene.setSensor(newSensor)
    newScene.setDestinationFile(destination)

    # if 'envmap' in config:
    # 	addEnvmap(newScene, config, pmgr)

    newScene.configure()

    return (newScene)
def makeScene():

    pmgr = PluginManager.getInstance()

    scene = Scene()

    scene.addChild(pmgr.create({
        'type' : 'perspective',
        'toWorld' : Transform.lookAt(
            Point(0, 0, -10),
            Point(0, 0, 0),
            Vector(0, 1, 0)
        ),
        'film' : {
            'type'   : 'ldrfilm',
            'width'  : 1920,
            'height' : 1080
        },
        'sampler' : {
            'type'        : 'ldsampler',
            'sampleCount' : 2
        }
    }))

    scene.addChild(pmgr.create({
        'type' : 'point',
        'position' : Point(5, 0, -10),
        'intensity' : Spectrum(100)
    }))

    scene.addChild(pmgr.create({
            'type' : 'sphere',
            'center' : Point(0, 0, 0),
            'radius' : 1.0,
            'bsdf' : {
                    'type' : 'diffuse',
                    'reflectance' : Spectrum(0.4)
            }
    }))

    scene.configure()

    return scene
Example #10
0
def makeScene():

    scene = Scene()

    pmgr = PluginManager.getInstance()

    # make shapes
    for i in range(100):
        shapeProps = Properties("sphere")
        shapeProps["center"] = Point(i, i, i)
        shapeProps["radius"] = 0.1
        shape = pmgr.createObject(shapeProps)
        shape.configure()

        scene.addChild(shape)

    # make perspective sensor
    sensorProps = Properties("perspective")
    sensorProps["toWorld"] = Transform.lookAt(Point(0, 0, 10), Point(0, 0, 0),
                                              Vector(0, 1, 0))
    sensorProps["fov"] = 45.0

    sensor = pmgr.createObject(sensorProps)

    # make film
    filmProps = Properties("ldrfilm")
    filmProps["width"] = 640
    filmProps["height"] = 480

    film = pmgr.createObject(filmProps)
    film.configure()

    sensor.addChild("film", film)
    sensor.configure()

    scene.addChild(sensor)
    scene.configure()

    return scene
Example #11
0
def makeScene():

    scene = Scene()

    pmgr = PluginManager.getInstance()

    # make shapes
    for i in range(100):
        shapeProps = Properties("sphere")
        shapeProps["center"] = Point(i, i, i)
        shapeProps["radius"] = 0.1
        shape = pmgr.createObject(shapeProps)
        shape.configure()

        scene.addChild(shape)

    # make perspective sensor
    sensorProps = Properties("perspective")
    sensorProps["toWorld"] = Transform.lookAt(Point(0, 0, 10), Point(0, 0, 0), Vector(0, 1, 0))
    sensorProps["fov"] = 45.0

    sensor = pmgr.createObject(sensorProps)

    # make film
    filmProps = Properties("ldrfilm")
    filmProps["width"]  = 640
    filmProps["height"] = 480

    film = pmgr.createObject(filmProps)
    film.configure()

    sensor.addChild("film", film)
    sensor.configure()

    scene.addChild(sensor)
    scene.configure()

    return scene
def init_scene():

    pmgr = PluginManager.getInstance()
    scene = Scene()
    scene.setDestinationFile('renderedResult')

    camera_pos = Vector(0, 0.0, -12)
    model_pos = Vector(0.5, -0.5, -2.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename='/media/adrian/Data/Datasets/train/02691156/model_0000003.obj')
    b = MitsubaShape(shape_type=MitsubaShape.CUBE, to_world=Transform.translate(model_pos))

    integrator = Integrator(Integrator.DIRECT, hide_emitters=True)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.LDR, image_width, image_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type' : 'scene',
        'a': a.config,
        'envmap' : {
             'type' : 'sunsky',
             'hour' : 12.0,
             'albedo' : Spectrum(1.0),
             'samplingWeight' : 1.0,
        },
        # 'envmap' : {
        #      'type' : 'constant',
        #      #'hour' : 10.0,
        #      'radiance' : Spectrum(1.0),
        #      'samplingWeight' : 0.5
        # },
        # 'integrator' : {
        #     'type' : 'multichannel',
        #     'depth' : {
        #         'type' : 'field',
        #         'field' : 'distance'
        #     },
        # },
        'integrator' : integrator.config,
        'sensor' : sensor.config
    }

    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())
    scene_node = pmgr.create(scene_config)
    scene.addChild(scene_node)
    scene.configure()

    scene.initialize()
    sceneResID = scheduler.registerResource(scene)

    num_views = 6
    step_size = 360/(num_views)
    for i in range(num_views):
        destination = 'results/result_%03i' % i
        # Create a shallow copy of the scene so that the queue can tell apart the two
        # rendering processes. This takes almost no extra memory
        newScene = mitsuba.render.Scene(scene)
        pmgr = PluginManager.getInstance()
        newSensor = pmgr.createObject(scene.getSensor().getProperties())
        # <change the position of 'newSensor' here>

        rotationCur = Transform.rotate(Vector(0, 1, 0), i*step_size)
        new_pos = rotationCur*camera_pos
        new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
        newSensor.setWorldTransform(new_transform)

        newFilm = pmgr.createObject(scene.getFilm().getProperties())
        newFilm.configure()
        newSensor.addChild(newFilm)
        newSensor.configure()
        newScene.addSensor(newSensor)
        newScene.setSensor(newSensor)
        newScene.setSampler(scene.getSampler())
        newScene.setDestinationFile(destination)
        # Create a render job and insert it into the queue. Note how the resource
        # ID of the original scene is provided to avoid sending the full scene
        # contents over the network multiple times.
        job = RenderJob('myRenderJob' + str(i), newScene, queue, sceneResID)
        job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
def init_scene():

    pmgr = PluginManager.getInstance()

    camera_pos = Vector(0, 0.0, -12)
    model_pos = Vector(0.5, -0.5, -2.0)

    #camera_pos = Vector(4, 44.0, -7.0)
    #model_pos = Vector(0.0, 0.0, 0.0)

    #camera_matrix = Transform(Matrix4x4([[0.1,0.017,-1.0,0.0],[0.0,1.0,0.0,0.1],[1.0,0.0,0.1,0.0],[4.3,-6.0,-7.0,1.0]]))

    cube_pos = Vector(-0.5, 0.0, -1.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename=mesh_file)
    b = MitsubaShape(shape_type=MitsubaShape.CUBE, to_world=Transform.translate(cube_pos))

    #integrator = create_integrator(RenderTargetType.AO, hide_emitters=False)
    integrator = create_integrator(RenderTargetType.DIRECT, hide_emitters=True)
    depth_integrator = create_integrator(RenderTargetType.INDEX, hide_emitters=True)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.LDR, image_width, image_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type' : 'scene',
        'a': a.config,
        # 'b':b.config,
        # 'envmap' : {
        #      'type' : 'sunsky',
        #      'hour' : 12.0,
        #      'albedo' : Spectrum(1.0),
        #      'samplingWeight' : 1.0,
        # },
        # 'envmap' : {
        #      'type' : 'sunsky',
        #      #'hour' : 10.0,
        #      'radiance': Spectrum(1.0),
        #      'samplingWeight': 1.0
        # },
        'sensor' : sensor.config
    }

    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())

    yangles = range(0,1)
    xangles = range(0,1)
    num_views = len(xangles) * len(yangles)
    #step_size = 360/(num_views)
    step_size = 1

    #List containing the integrators to use in Multi-Pass rendering
    passes = [integrator, depth_integrator]

    start = time.time()
    render_count = 0
    num_scale = 1
    offset = num_scale/2
    print ("Size:", (num_views*num_scale))

    for x in xangles:
        for y in yangles:
            original_Z = camera_pos[2]
            for s in range(num_scale):

                #for y in range(yangles):
                    #Set the correct destination file
                new_camera = camera_pos
                z = (s - offset)
                print ("Z:", z)
                print(new_camera[2])
                new_camera[2] = original_Z + z
                print(new_camera[2])

                for p in passes:

                    i = render_count
                    scene = Scene()
                    pass_config = scene_config

                    destination = 'results/%03i' % i
                    if p.type == RenderTargetType.DEPTH:
                        destination += "_d"
                    elif p.type == RenderTargetType.NORMAL:
                        destination += "_n"

                    #Set the pass integrator
                    pass_config['integrator'] = p.config
                    sceneResID = register_scene_config(scene, pmgr, scene_config)

                    # Create a shallow copy of the scene so that the queue can tell apart the two
                    # rendering processes. This takes almost no extra memory
                    newScene = mitsuba.render.Scene(scene)
                    pmgr = PluginManager.getInstance()
                    newSensor = pmgr.createObject(scene.getSensor().getProperties())

                    #Calculate the rotations
                    yrotation = Transform.rotate(Vector(1, 0, 0), y)
                    xrotation = Transform.rotate(Vector(0, 1, 0), x)
                    rotationCur = xrotation * yrotation

                    #Set the new camera position, applying the rotations
                    new_pos = rotationCur*new_camera
                    new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
                    newSensor.setWorldTransform(new_transform)

                    newFilm = pmgr.createObject(scene.getFilm().getProperties())
                    newFilm.configure()
                    newSensor.addChild(newFilm)
                    newSensor.configure()
                    newScene.addSensor(newSensor)
                    newScene.setSensor(newSensor)
                    newScene.setSampler(scene.getSampler())
                    newScene.setDestinationFile(destination)

                    # Create a render job and insert it into the queue. Note how the resource
                    # ID of the original scene is provided to avoid sending the full scene
                    # contents over the network multiple times.
                    j = RenderJob('myRenderJob' + str(i), newScene, queue, sceneResID)
                    j.start()

                render_count += 1


    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)

    finish = time.time()
    print("Run Time:", finish-start)
Example #14
0
class MitsubaRenderer(AbstractRenderer):
    def __init__(self, scene):
        super(MitsubaRenderer, self).__init__(scene);

    def render(self):
        self.__initialize();
        self.__add_integrator();
        self.__add_lights();
        self.__add_active_camera();
        self.__add_active_view();
        self.__add_active_primitives();
        self.__add_others();
        self.__run_mitsuba();

    def __initialize(self):
        self.__initialize_mitsuba_setting();
        self.__initialize_image_setting();
        self.__initialize_geometry_setting();

    def __initialize_mitsuba_setting(self):
        self.plgr = PluginManager.getInstance();
        self.output_dir = self.scene.output_dir;

        mitsuba_module_path = os.path.dirname(
                inspect.getfile(MitsubaRenderer));
        self.file_resolver = Thread.getThread().getFileResolver();
        self.file_resolver.appendPath(os.path.join(
            mitsuba_module_path, "xml_files/"));
        self.file_resolver.appendPath(os.path.join(
            mitsuba_module_path, "textures/"));
        self.file_resolver.appendPath(os.path.join(
            mitsuba_module_path, "shapes/"));

        self.mitsuba_scene = Scene();

    def __initialize_image_setting(self):
        active_view = self.scene.active_view;
        self.image_name = os.path.join(self.output_dir, active_view.name);
        self.image_width = active_view.width;
        self.image_height = active_view.height;

    def __initialize_geometry_setting(self):
        active_view = self.scene.active_view;
        if len(active_view.vertices) > 0:
            self.global_transform = self.scene.global_transform;
            global_rotation = self.scene.global_transform[:3, :3];

            vertices = (active_view.vertices - active_view.center) * active_view.scale;
            vertices = np.dot(active_view.rotation, vertices.T) +\
                    active_view.translation[:, np.newaxis];
            vertices = np.dot(global_rotation, vertices);
            vertices = vertices.T;
            self.transformed_bbox_min = np.amin(vertices, axis=0);
            self.transformed_bbox_max = np.amax(vertices, axis=0);

            center = 0.5 * (self.transformed_bbox_min + self.transformed_bbox_max);
            self.floor_height = self.transformed_bbox_min[1] - center[1];
        else:
            dim = active_view.vertices.shape[1];
            self.transformed_bbox_min = np.zeros(dim);
            self.transformed_bbox_max = np.ones(dim);
            self.floor_height = 0;
            self.global_transform = self.scene.global_transform;

    def __add_integrator(self):
        integrator = self.plgr.create({
            "type": "direct",
            "shadingSamples": 16
            #"type": "ao"
            #"type": "volpath",
            #"type": "path"
            });
        self.mitsuba_scene.addChild(integrator);

    def __add_lights(self):
        #TODO: load lights from scene
        front_light = self.plgr.create({
            "type": "sphere",
            "center": Point(3.0, 6.0, 4.0),
            "radius": 2.5,
            "emitter": {
                "type": "area",
                "radiance": Spectrum(10.0),
                "samplingWeight": 10.0
                }
            });

        side_light = self.plgr.create({
            "type": "point",
            "position": Point(4.0, 4.0, -1.0),
            "intensity": Spectrum(5.0)
            });

        back_light = self.plgr.create({
            "type": "point",
            "position": Point(-0, 5.0, -1),
            "intensity": Spectrum(5.0)
            });

        self.mitsuba_scene.addChild(front_light);
        #self.mitsuba_scene.addChild(side_light);
        #self.mitsuba_scene.addChild(back_light);

    def __add_active_camera(self):
        active_view = self.scene.active_view;
        camera = self.scene.active_camera;
        if active_view.transparent_bg:
            pixel_format = "rgba";
        else:
            pixel_format = "rgb";

        crop_bbox = np.array(camera.crop_bbox);
        if np.amax(crop_bbox) <= 1.0:
            # bbox is relative.
            crop_bbox[:,0] *= self.image_width;
            crop_bbox[:,1] *= self.image_height;

        assert(np.all(crop_bbox >= 0));
        assert(np.all(crop_bbox[:,0] <= self.image_width));
        assert(np.all(crop_bbox[:,1] <= self.image_height));

        mitsuba_camera = self.plgr.create({
            "type": "perspective",
            "fov": float(camera.fovy),
            "fovAxis": "y",
            "toWorld": Transform.lookAt(
                Point(*camera.location),
                Point(*camera.look_at_point),
                Vector(*camera.up_direction)),
            "film": {
                "type": "ldrfilm",
                "width": self.image_width,
                "height": self.image_height,
                "cropOffsetX": int(crop_bbox[0,0]),
                "cropOffsetY": int(crop_bbox[0,1]),
                "cropWidth": int(crop_bbox[1,0] - crop_bbox[0,0]),
                "cropHeight": int(crop_bbox[1,1] - crop_bbox[0,1]),
                "banner": False,
                "pixelFormat": pixel_format,
                "rfilter": {
                    "type": "gaussian"
                    }
                },
            "sampler": {
                "type": "halton",
                "sampleCount": 8
                }
            });
        self.mitsuba_scene.addChild(mitsuba_camera);

    def __add_active_view(self):
        self.__add_view(self.scene.active_view);

    def __add_view(self, active_view):
        if len(active_view.subviews) > 0:
            for view in active_view.subviews:
                self.__add_view(view);
            return;

        if len(active_view.faces) == 0: return;

        old_active_view = self.scene.active_view;
        self.scene.active_view = active_view;
        mesh_file = self.__save_temp_mesh(active_view);
        normalize_transform = self.__get_normalize_transform(active_view);
        view_transform = self.__get_view_transform(active_view);
        glob_transform = self.__get_glob_transform();

        total_transform = glob_transform * normalize_transform * view_transform;
        material_setting = self.__get_material_setting(active_view);
        setting = {
                "type": "ply",
                "filename": mesh_file,
                "faceNormals": True,
                "toWorld": total_transform
                }
        setting.update(material_setting);
        target_shape = self.plgr.create(setting);
        self.mitsuba_scene.addChild(target_shape);
        self.scene.active_view = old_active_view;

    def __add_active_primitives(self):
        self.__add_primitives(self.scene.active_view);

    def __add_primitives(self, active_view):
        if len(active_view.subviews) > 0:
            for view in active_view.subviews:
                self.__add_primitives(view);
            return;

        old_active_view = self.scene.active_view;
        self.scene.active_view = active_view;
        scale = active_view.scale;
        normalize_transform = self.__get_normalize_transform(active_view);
        view_transform = self.__get_view_transform(active_view);
        glob_transform = self.__get_glob_transform();
        total_transform = glob_transform * normalize_transform * view_transform;

        primitives = self.scene.active_view.primitives;
        for shape in primitives:
            if shape.color[3] <= 0.0: continue;
            color = {
                    "type": "plastic",
                    "diffuseReflectance": Spectrum(shape.color[:3].tolist())
                    };
            if isinstance(shape, Cylinder):
                if shape.radius <= 0.0: continue;
                setting = self.__add_cylinder(shape);
                setting["bsdf"] = color;
                setting["toWorld"] = total_transform
            elif isinstance(shape, Cone):
                if shape.radius <= 0.0: continue;
                setting = self.__add_cone(shape);
                setting["bsdf"] = color;
                setting["toWorld"] = total_transform * setting["toWorld"];
            elif isinstance(shape, Sphere):
                if shape.radius <= 0.0: continue;
                # Due to weird behavior in Mitsuba, all transformation is
                # applied directly on radius and center variable.
                setting = self.__add_sphere(shape);
                setting["radius"] *= scale;
                setting["center"] = total_transform * setting["center"];
                setting["bsdf"] = color;
            else:
                raise NotImplementedError("Unknown primitive: {}".format(shape));

            mitsuba_primative = self.plgr.create(setting);
            self.mitsuba_scene.addChild(mitsuba_primative);
        self.scene.active_view = old_active_view;

    def __add_sphere(self, shape):
        setting = {
                "type": "sphere",
                "radius": shape.radius,
                "center": Point(*shape.center)
                };
        return setting;

    def __add_cylinder(self, shape):
        setting = {
                "type": "cylinder",
                "p0": Point(*shape.end_points[0]),
                "p1": Point(*shape.end_points[1]),
                "radius": shape.radius
                };
        return setting;

    def __add_cone(self, shape):
        y_dir = np.array([0.0, 1.0, 0.0]);
        v = shape.end_points[1] - shape.end_points[0];
        center = 0.5 * (shape.end_points[0] + shape.end_points[1]);
        height = norm(v);
        scale = Transform.scale(
                Vector(shape.radius, height, shape.radius));
        axis = np.cross(y_dir, v);
        axis_len = norm(axis);
        angle = degrees(atan2(axis_len, np.dot(y_dir, v)));

        if (axis_len > 1e-6):
            axis /= axis_len;
            rotate = Transform.rotate(Vector(*axis), angle);
        else:
            axis = np.array([1.0, 0.0, 0.0]);
            rotate = Transform.rotate(Vector(*axis), angle);
        translate = Transform.translate(Vector(*center));

        cone_file = self.file_resolver.resolve("cone.ply");
        cone_transform = translate * rotate * scale;
        setting = {
                "type": "ply",
                "filename": cone_file,
                "toWorld": cone_transform
                }
        return setting;

    def __get_material_setting(self, active_view):
        setting = {};
        if self.with_wire_frame:
            diffuse_color = {
                    "type": "wireframe",
                    "edgeColor": Spectrum(0.0),
                    "lineWidth": active_view.line_width,
                    };
            if self.with_uniform_colors:
                diffuse_color["interiorColor"] =\
                        Spectrum(active_view.vertex_colors[0][0].tolist()[0:3]);
            #elif self.with_colors:
            #    diffuse_color["interiorColor"] = { "type": "vertexcolors" }
        else:
            if self.with_colors:
                diffuse_color = { "type": "vertexcolors" }
            else:
                diffuse_color = Spectrum(0.2);

        setting["bsdf"] = {
                "type": "roughplastic",
                "distribution": "beckmann",
                "alpha": 0.2,
                "diffuseReflectance": diffuse_color
                };
        setting["bsdf"] = {
                "type": "twosided",
                "bsdf": setting["bsdf"]
                };
        if self.with_alpha:
            setting["bsdf"] = {
                    "type": "mask",
                    "opacity": Spectrum(active_view.alpha),
                    "bsdf": setting["bsdf"]
                    };
        if not self.with_colors and not self.with_alpha and not self.with_wire_frame:
            setting["subsurface"] = {
                    "type": "dipole",
                    "material": "Skimmilk",
                    "scale": 0.5
                    };
        elif not self.with_alpha:
            setting["subsurface"] = {
                    "type": "dipole",
                    "material": "sprite",
                    "scale": 0.5
                    };
        return setting;

    def __add_others(self):
        active_view = self.scene.active_view;
        if active_view.with_quarter:
            self.__add_quarter();
        if active_view.with_axis:
            self.__add_axis();
        if active_view.background != "n":
            self.__add_floor();

    def __add_quarter(self):
        scale = self.scene.active_view.scale;
        radius = 12.13 * scale;
        thickness = 0.875 * scale;
        face_scale = Transform.scale(Vector(radius));
        tail_offset = Transform.translate(Vector(0, 0, thickness));
        head_offset = Transform.translate(Vector(0, 0, -thickness)) *\
                Transform.scale(Vector(1.0, 1.0, -1.0));

        bbox_diag = 0.5 * norm(
                self.transformed_bbox_max - self.transformed_bbox_min);
        custom_transform = Transform.translate(Vector(
            0.5,
            self.floor_height + radius + 0.01,
            -bbox_diag - 0.01));

        head_texture = self.file_resolver.resolve("head.png");
        tail_texture = self.file_resolver.resolve("tail.png");
        side_texture = self.file_resolver.resolve("side.png");

        quarter_ring = self.plgr.create({
            "type": "cylinder",
            "p0": Point(0.0, 0.0, thickness),
            "p1": Point(0.0, 0.0, -thickness),
            "radius": radius,
            "toWorld": custom_transform,
            "bsdf": {
                "type": "bumpmap",
                "texture": {
                    "type": "scale",
                    "scale": 0.01,
                    "texture": {
                        "type": "bitmap",
                        "filename": side_texture,
                        "gamma": 1.0,
                        "uscale": 100.0,
                        },
                    },
                "bsdf": {
                    "type": "roughconductor",
                    "distribution": "ggx",
                    "alpha": 0.5,
                    "material": "Ni_palik"
                    #"diffuseReflectance": Spectrum(0.5)
                    }
                }
            });
        head = self.plgr.create({
            "type": "disk",
            "toWorld": custom_transform * head_offset * face_scale,
            "bsdf": {
                "type": "diffuse",
                "reflectance": {
                    "type": "bitmap",
                    "filename": head_texture
                    }
                }
            });
        tail = self.plgr.create({
            "type": "disk",
            "toWorld": custom_transform * tail_offset * face_scale,
            "bsdf": {
                "type": "diffuse",
                "reflectance": {
                    "type": "bitmap",
                    "filename": tail_texture
                    }
                }
            });

        self.mitsuba_scene.addChild(quarter_ring);
        self.mitsuba_scene.addChild(head);
        self.mitsuba_scene.addChild(tail);

    def __add_axis(self):
        raise NotImplementedError("Adding axis is not supported");

    def __add_floor(self):
        rotate_transform = Transform.rotate(Vector(-1, 0, 0), 90);
        scale_transform = Transform.scale(Vector(100, 100, 100));
        translate_transform = Transform.translate(
                Vector(0.0, self.floor_height, 0.0));
        total_transform = translate_transform * scale_transform\
                * rotate_transform;

        if self.scene.active_view.background == "d":
            reflectance = Spectrum(0.05);
        elif self.scene.active_view.background == "l":
            reflectance = Spectrum(0.5);
        else:
            reflectance = Spectrum(0.0);

        floor = self.plgr.create({
            "type": "rectangle",
            "toWorld": total_transform,
            "bsdf": {
                "type": "roughdiffuse",
                "diffuseReflectance": reflectance,
                "alpha": 0.5
                }
            });
        self.mitsuba_scene.addChild(floor);

    def __run_mitsuba(self):
        self.mitsuba_scene.configure();

        scheduler = Scheduler.getInstance();
        for i in range(multiprocessing.cpu_count()):
            scheduler.registerWorker(LocalWorker(i, "worker_{}".format(i)));
        scheduler.start();

        queue = RenderQueue();
        self.mitsuba_scene.setDestinationFile(self.image_name);

        job = RenderJob("render job: {}".format(
            self.image_name), self.mitsuba_scene, queue);
        job.start();

        queue.waitLeft(0);
        queue.join();

        print(Statistics.getInstance().getStats());
        scheduler.stop();

    def __save_temp_mesh(self, active_view):
        basename, ext = os.path.splitext(self.image_name);
        path, name = os.path.split(basename);
        now = datetime.datetime.now()
        stamp = now.isoformat();
        tmp_dir = tempfile.gettempdir();
        tmp_mesh_name = os.path.join(tmp_dir, "{}_{}.ply".format(name,
            stamp));

        vertices = active_view.vertices;
        faces = active_view.faces;
        voxels = active_view.voxels;

        dim = vertices.shape[1];
        num_faces, vertex_per_face = faces.shape;
        vertices = vertices[faces.ravel(order="C")];
        colors = active_view.vertex_colors.reshape((-1, 4), order="C");
        colors *= 255;
        faces = np.arange(len(vertices), dtype=int).reshape(
                (num_faces, vertex_per_face), order="C");

        mesh = pymesh.form_mesh(vertices, faces);
        mesh.add_attribute("red");
        mesh.set_attribute("red", colors[:,0].ravel());
        mesh.add_attribute("green");
        mesh.set_attribute("green", colors[:,1].ravel());
        mesh.add_attribute("blue");
        mesh.set_attribute("blue", colors[:,2].ravel());

        pymesh.save_mesh(tmp_mesh_name, mesh,
                "red", "green", "blue", ascii=True, use_float=True, anonymous=True);
        return tmp_mesh_name;

    def __get_normalize_transform(self, active_view):
        centroid = active_view.center
        scale = active_view.scale

        normalize_transform = Transform.scale(Vector(scale, scale, scale)) *\
                Transform.translate(Vector(*(-1 * centroid)));
        return normalize_transform;

    def __get_view_transform(self, active_view):
        transform = np.eye(4);
        transform[0:3, :] = active_view.transform.reshape((3, 4), order="F");
        view_transform = Transform(Matrix4x4(transform.ravel(order="C").tolist()));
        return view_transform;

    def __get_glob_transform(self):
        glob_transform = Transform(
                Matrix4x4(self.global_transform.ravel(order="C").tolist()));
        return glob_transform;

    @property
    def with_colors(self):
        return self.scene.active_view.with_colors;

    @property
    def with_alpha(self):
        return self.scene.active_view.alpha != 1.0;

    @property
    def with_wire_frame(self):
        return self.scene.active_view.with_wire_frame;

    @property
    def with_uniform_colors(self):
        return self.scene.active_view.with_uniform_colors;
Example #15
0
    def Render(self, sampleCount, i):
        ## Creating a copy of the base scene and add modifications regarding varaiant camera's properties (sensor position, sampler)
        currScene = Scene(self.scene)
        currScene.configure()
        pmgr = PluginManager.getInstance()
        currScene.addSensor(self.cam)
        currScene.setSensor(self.cam)
        self.createSampler(sampleCount)
        currScene.setSampler(self.sampler)  #(self.sampler)
        currScene.setDestinationFile('')

        ## Create a render job and insert it into the queue
        #job = RenderJob('myRenderJob'+str(i), currScene, self.queue )
        curSceneResID = self.scheduler.registerResource(currScene)
        job = RenderJob('myRenderJob' + str(i), currScene, self.queue,
                        curSceneResID)
        #job = RenderJob('myRenderJob'+str(i), currScene, self.queue,self.sceneResID )  # passing self.sceneResID - in order to create shallow copy of the scene to all warkers
        job.start()

        self.queue.waitLeft(0)
        self.queue.join()

        ## Aquire Bitmap format of the rendered image:
        film = currScene.getFilm()
        size = film.getSize()
        bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
        film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)

        ## End of render - get result
        result_image = np.array(
            bitmap.buffer()) if sys.platform == 'linux2' else np.array(
                bitmap.getNativeBuffer())
        # TODO : update Mitsuba version of Windows, with the updated API - bitmap.getNativeBuffer() doesn't exsists animore
        currSceneInfo = currScene.getAABB
        return result_image, currSceneInfo
Example #16
0
def renderVPLS(vpls, cam, scene, target):
    ''' render VPLS having the camera looking at the desired target the scene. The result will 
	be an image that will be used to define the 3D space where the camera and
	object can be placed in the environment. Target can be either be 'roof' or 'floor' '''

    pmgr = PluginManager.getInstance()
    scheduler = Scheduler.getInstance()

    if (target == 'roof'):
        target_height = scene.HeightRoof
    else:
        target_height = scene.HeightFloor

    # Start up the scheduling system with one worker per local core
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()

    # Create a queue for tracking render jobs
    queue = RenderQueue()

    nVPLS = int(vpls[0][1]) * 4

    scene = Scene()

    for i in xrange(1, nVPLS, 4):
        if (float(vpls[i][2]) == target_height):
            scene.addChild(
                pmgr.create({
                    'type':
                    'sphere',
                    'center':
                    Point(float(vpls[i][1]), float(vpls[i][2]),
                          float(vpls[i][3])),
                    'radius':
                    1.0,
                    'emitter':
                    pmgr.create({
                        'type': 'area',
                        'radiance': Spectrum(10.),
                    })
                }))

    scene.addChild(
        pmgr.create({
            'type':
            'perspective',
            'toWorld':
            Transform.lookAt(
                Point(cam.origin[0], cam.origin[1], cam.origin[2]),
                Point(cam.target[0], cam.target[1], cam.target[2]),
                Vector(cam.up[0], cam.up[1], cam.up[2])),
            'fov':
            cam.fov,
            'film': {
                'type': 'ldrfilm',
                'width': cam.width,
                'height': cam.height,
                'banner': False,
            },
            'sampler': {
                'type': 'halton',
                'sampleCount': 1
            },
        }))

    scene.addChild(pmgr.create({'type': 'direct'}))
    scene.configure()

    if (target == 'roof'):
        filename = 'renderVPLSRoof'
    else:
        filename = 'renderVPLSFloor'

    scene.setDestinationFile(filename)

    # Create a render job and insert it into the queue
    job = RenderJob('myRenderJob', scene, queue)
    job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
    queue.join()

    scheduler.stop()
Example #17
0
        class ApiExportContext(ExportContextBase):
            '''
            Python API
            '''

            EXPORT_API_TYPE = 'API'

            thread = None
            scheduler = None
            pmgr = None
            scene = None

            def __init__(self):
                super().__init__()

                self.thread = Thread.registerUnmanagedThread('exporter')
                self.thread.setFileResolver(main_fresolver)
                self.thread.setLogger(main_logger)

                self.pmgr = PluginManager.getInstance()
                self.scene = Scene()

            # Funtions binding to Mitsuba extension API

            def spectrum(self, value, mode=''):
                if not mode:
                    mode = self.color_mode

                spec = None

                if isinstance(value, (dict)):
                    if 'type' in value:
                        if value['type'] in {'rgb', 'srgb', 'spectrum'}:
                            spec = self.spectrum(value['value'], value['type'])

                        elif value['type'] == 'blackbody':
                            spec = Spectrum()
                            spec.fromContinuousSpectrum(BlackBodySpectrum(value['temperature']))
                            spec.clampNegative()
                            spec = spec * value['scale']

                elif isinstance(value, (float, int)):
                    spec = Spectrum(value)

                elif isinstance(value, (str)):
                    contspec = InterpolatedSpectrum(self.get_export_path(value))
                    spec = Spectrum()
                    spec.fromContinuousSpectrum(contspec)
                    spec.clampNegative()

                else:
                    try:
                        items = list(value)

                        for i in items:
                            if not isinstance(i, (float, int, tuple)):
                                raise Exception('Error: spectrum list contains an unknown type')

                    except:
                        items = None

                    if items:
                        totitems = len(items)

                        if isinstance(items[0], (float, int)):
                            if totitems == 3 or totitems == 4:
                                spec = Spectrum()

                                if mode == 'srgb':
                                    spec.fromSRGB(items[0], items[1], items[2])

                                else:
                                    spec.fromLinearRGB(items[0], items[1], items[2])

                            elif totitems == 1:
                                spec = Spectrum(items[0])

                            else:
                                MtsLog('Expected spectrum items to be 1, 3 or 4, got %d.' % len(items), type(items), items)

                        else:
                            spec = Spectrum()
                            contspec = InterpolatedSpectrum()

                            for spd in items:
                                (wlen, val) = spd
                                contspec.append(wlen, val)

                            spec.fromContinuousSpectrum(contspec)
                            spec.clampNegative()

                    else:
                        MtsLog('Unknown spectrum type.', type(value), value)

                if spec is None:
                    spec = Spectrum(0.0)

                return spec

            def vector(self, x, y, z):
                # Blender is Z up but Mitsuba is Y up, convert the vector
                return Vector(x, z, -y)

            def point(self, x, y, z):
                # Blender is Z up but Mitsuba is Y up, convert the point
                return Point(x, z, -y)

            def transform_lookAt(self, origin, target, up, scale=None):
                # Blender is Z up but Mitsuba is Y up, convert the lookAt
                transform = Transform.lookAt(
                    Point(origin[0], origin[2], -origin[1]),
                    Point(target[0], target[2], -target[1]),
                    Vector(up[0], up[2], -up[1])
                )

                if scale is not None:
                    transform *= Transform.scale(Vector(scale, scale, 1))

                return transform

            def animated_lookAt(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, (origin, target, up, scale)) in motion:
                        transform.appendTransform(t, self.transform_lookAt(origin, target, up, scale))

                else:
                    (origin, target, up, scale) = motion[0][1]
                    transform = self.transform_lookAt(origin, target, up, scale)

                return transform

            def transform_matrix(self, matrix):
                # Blender is Z up but Mitsuba is Y up, convert the matrix
                global_matrix = axis_conversion(to_forward="-Z", to_up="Y").to_4x4()
                l = matrix_to_list(global_matrix * matrix)
                mat = Matrix4x4(l)
                transform = Transform(mat)

                return transform

            def animated_transform(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, m) in motion:
                        transform.appendTransform(t, self.transform_matrix(m))

                else:
                    transform = self.transform_matrix(motion[0][1])

                return transform

            def configure(self):
                '''
                Call Scene configure
                '''

                self.scene.addChild(self.pmgr.create(self.scene_data))
                self.scene.configure()

                # Reset the volume redundancy check
                ExportedVolumes.reset_vol_list()

            def cleanup(self):
                self.exit()

            def exit(self):
                # Do nothing
                pass
Example #18
0
 def Render(self,sampleCount):
         currScene = Scene(self.scene)
         for light in self.light:
                 currScene.addChild(light)
         currScene.configure()    
         currScene.addSensor(self.cam)   
         currScene.setSensor(self.cam) 
         self.__createSampler(sampleCount) # sample count
         currScene.setSampler(self.sampler)
      
         currScene.setDestinationFile('')
         # Create a render job and insert it into the queue
         job = RenderJob('myRenderJob', currScene, self.queue )
         job.start()
         self.queue.waitLeft(0)
         self.queue.join()
         
         film = currScene.getFilm()
         size = film.getSize()
         bitmap = Bitmap(Bitmap.ERGBA, Bitmap.EFloat16, size)
         film.develop(Point2i(0, 0), size, Point2i(0, 0), bitmap)
         # End of render - get result
         result_image = np.array(bitmap.getNativeBuffer())                                
         currSceneInfo = currScene.getAABB
         return result_image, currSceneInfo
Example #19
0
def do_simulation_multiangle_seq(seqname):
    currdir = os.path.split(os.path.realpath(__file__))[0]
    sys.path.append(currdir + '/bin/rt/' + current_rt_program + '/python/2.7/')
    os.environ['PATH'] = currdir + '/bin/rt/' + current_rt_program + os.pathsep + os.environ['PATH']
    import mitsuba
    from mitsuba.core import Vector, Point, Ray, Thread, Scheduler, LocalWorker, PluginManager, Transform
    from mitsuba.render import SceneHandler
    from mitsuba.render import RenderQueue, RenderJob
    from mitsuba.render import Scene
    import multiprocessing

    scheduler = Scheduler.getInstance()
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()


    scene_path = session.get_scenefile_path()
    fileResolver = Thread.getThread().getFileResolver()
    fileResolver.appendPath(str(scene_path))
    scene = SceneHandler.loadScene(fileResolver.resolve(
        str(os.path.join(session.get_scenefile_path(), main_scene_xml_file))))
    scene.configure()
    scene.initialize()
    queue = RenderQueue()
    sceneResID = scheduler.registerResource(scene)
    bsphere = scene.getKDTree().getAABB().getBSphere()
    radius = bsphere.radius
    targetx, targety, targetz = bsphere.center[0], bsphere.center[1], bsphere.center[2]
    f = open(seqname + ".conf", 'r')
    params = json.load(f)
    obs_azimuth = params['seq1']['obs_azimuth']
    obs_zenith = params['seq2']['obs_zenith']
    cfgfile = session.get_config_file()
    f = open(cfgfile, 'r')
    cfg = json.load(f)
    viewR = cfg["sensor"]["obs_R"]
    mode = cfg["sensor"]["film_type"]
    azi_arr = map(lambda x: float(x), obs_azimuth.strip().split(":")[1].split(","))
    zeni_arr = map(lambda x: float(x), obs_zenith.strip().split(":")[1].split(","))
    seq_header = multi_file_prefix + "_" + seqname
    index = 0
    for azi in azi_arr:
        for zeni in zeni_arr:
            distFile = os.path.join(session.get_output_dir(),
                                    seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(".", "_"))
            newScene = Scene(scene)
            pmgr = PluginManager.getInstance()
            newSensor = pmgr.createObject(scene.getSensor().getProperties())
            theta = zeni / 180.0 * math.pi
            phi = (azi - 90) / 180.0 * math.pi
            scale_x = radius
            scale_z = radius
            toWorld = Transform.lookAt(
                Point(targetx - viewR * math.sin(theta) * math.cos(phi), targety + viewR * math.cos(theta),
                      targetz - viewR * math.sin(theta) * math.sin(phi)),  # original
                Point(targetx, targety, targetz),  # target
                Vector(0, 0, 1)  # up
            ) * Transform.scale(
                Vector(scale_x, scale_z, 1)  # 视场大小
            )
            newSensor.setWorldTransform(toWorld)
            newFilm = pmgr.createObject(scene.getFilm().getProperties())
            newFilm.configure()
            newSensor.addChild(newFilm)
            newSensor.configure()
            newScene.addSensor(newSensor)
            newScene.setSensor(newSensor)
            newScene.setSampler(scene.getSampler())
            newScene.setDestinationFile(str(distFile))
            job = RenderJob('Simulation Job' + "VA_"+str(azi)+"_VZ_"+str(zeni), newScene, queue, sceneResID)
            job.start()
        queue.waitLeft(0)
        queue.join()
    # handle npy
    if mode == "spectrum" and (output_format not in ("npy", "NPY")):
        for azi in azi_arr:
            for zeni in zeni_arr:
                distFile = os.path.join(session.get_output_dir(),
                                        seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(
                                            ".", "_"))
                data = np.load(distFile + ".npy")
                bandlist = cfg["sensor"]["bands"].split(",")
                RasterHelper.saveToHdr_no_transform(data, distFile, bandlist, output_format)
                os.remove(distFile + ".npy")
def init_scene():

    pmgr = PluginManager.getInstance()
    scene = Scene()
    scene.setDestinationFile('renderedResult')

    camera_pos = Vector(0, 0.0, -12)
    model_pos = Vector(0.5, -0.5, -2.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename=mesh_file)
    b = MitsubaShape(shape_type=MitsubaShape.CUBE, to_world=Transform.translate(model_pos))

    integrator = create_integrator(RenderTargetType.DEPTH, hide_emitters=False)
    #integrator = create_integrator(IntegratorType.DIRECT, hide_emitters=True)
    #integrator = create_integrator(RenderTargetType.NORMAL, hide_emitters=True)

    print(integrator.config)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.HDR, image_width, image_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type' : 'scene',
              'integrator' : {
                'type' : 'multichannel',
                # 'a': {
                #     'type' : 'path'
                # },
                # 'b': {
                #     'type' : 'field',
                #     'field' : 'distance',
                #     'undefined': 0.0
                # },
                'c': {
                    'type' : 'field',
                    'field' : 'distance',
                    'undefined': 0.0
                }
            },
            'sphere' : {
                'type' : 'sphere',
                'bsdf' : {
                    'type' : 'dielectric',
                    'reflectance' : Spectrum(0.4)
                }
            },
            'envmap' : {
                'type' : 'sunsky',
                'albedo' : Spectrum(0.5)
            },
            'sensor' : {
                'type' : 'perspective',
                'toWorld' : Transform.translate(Vector(0, 0, 0)),
                'sampler' : {
                    'type' : 'halton',
                    'sampleCount' : 64

                },
                'film' : {
                    'type' : 'ldrfilm',
                    'width' : 500,
                    'height' : 500,
                    'pixelFormat': "rgb",
                    'channelNames': "normal"
                }
            },

        }

    # # Add a shape
    # scene.addChild(pmgr.create({
    # 'type' : 'sphere',
    # 'center' : Point(0, 0, 0),
    # 'radius' : 1.0,
    # 'bsdf' : {
    # 'type' : 'diffuse',
    # 'reflectance' : Spectrum(0.4)
    # }
    # }))




    scene.addChild(pmgr.create(scene_config))
    scene.configure()


    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())
    # scene_node = pmgr.create(scene_config)
    # scene.addChild(scene_node)
    # scene.configure()

    scene.initialize()
    sceneResID = scheduler.registerResource(scene)

    num_views = 1
    step_size = 360/(num_views)
    for i in range(num_views):
        destination = 'results/result_%03i' % i
        # Create a shallow copy of the scene so that the queue can tell apart the two
        # rendering processes. This takes almost no extra memory
        newScene = mitsuba.render.Scene(scene)
        pmgr = PluginManager.getInstance()
        newSensor = pmgr.createObject(scene.getSensor().getProperties())
        # <change the position of 'newSensor' here>

        rotationCur = Transform.rotate(Vector(0, 1, 0), i*step_size)
        new_pos = rotationCur*camera_pos
        new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
        newSensor.setWorldTransform(new_transform)

        newFilm = pmgr.createObject(scene.getFilm().getProperties())
        newFilm.configure()
        newSensor.addChild(newFilm)
        newSensor.configure()
        newScene.addSensor(newSensor)
        newScene.setSensor(newSensor)
        newScene.setSampler(scene.getSampler())
        newScene.setDestinationFile(destination)
        # Create a render job and insert it into the queue. Note how the resource
        # ID of the original scene is provided to avoid sending the full scene
        # contents over the network multiple times.
        job = RenderJob('myRenderJob' + str(i), scene, queue, sceneResID)
        job.start()

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)
def main(input_file, output_dir, output_name, img_width, img_height, num_samples):

    pmgr = PluginManager.getInstance()

    # Bed pos
    camera_pos = Vector(0, 1.0, 2.0)
    model_pos = Vector(0.0, 0, 0.0)

    a = MitsubaShape(shape_type=MitsubaShape.PLY_TYPE, to_world=Transform.translate(model_pos), filename=input_file)

    # integrator = create_integrator(RenderTargetType.AO, hide_emitters=False)
    integrator = create_integrator(RenderTargetType.DIRECT, hide_emitters=False)
    depth_integrator = create_integrator(RenderTargetType.DEPTH, hide_emitters=False)
    position_integrator = create_integrator(RenderTargetType.POSITION, hide_emitters=False)
    normal_integrator = create_integrator(RenderTargetType.NORMAL, hide_emitters=False)
    #uv_integrator = create_integrator(RenderTargetType.UV, hide_emitters=False)

    sampler = Sampler(SamplerType.HALTON, num_samples=num_samples)
    film = Film(FilmType.LDR, img_width, img_height)
    sensor = Sensor(SensorType.PERSPECTIVE, sampler=sampler, film=film, to_world=Transform.translate(camera_pos))

    scene_config = {
        'type': 'scene',
        'a': a.config,
        # 'b':b.config,
        'envmap': {
            'type': 'sunsky',
            'hour': 12.0,
            'albedo': Spectrum(1.0),
            'samplingWeight': 1.0,
        },
        # 'envmap' : {
        #      'type' : 'sunsky',
        #      #'hour' : 10.0,
        #      'radiance': Spectrum(1.0),
        #      'samplingWeight': 1.0
        # },
        'sensor': sensor.config
    }

    # scene_config['cube'] = create_object('cube', Transform.translate(model_pos), bsdf=create_bsdf())
    num_views = 6
    xangles = [y for y in frange(0, 360, np.floor(360 / num_views))]
    # xangles = [x for x in frange(0,12, 1.0)]
    yangles = [0.0]
    print(yangles)
    # num_views = len(xangles) * len(yangles)
    # step_size = 360/(num_views)
    step_size = 1

    # List containing the integrators to use in Multi-Pass rendering
    passes = [integrator, depth_integrator, normal_integrator, position_integrator]

    start = time.time()
    render_count = 0
    num_scale = 1
    offset = num_scale / 2
    print("Size:", (num_views * num_scale))

    # translations = [xt for xt in frange(-0.5,1.0, 0.5)]
    translations = []

    num_images = len(yangles) * len(xangles) * len(translations) * len(translations) * num_scale
    print("Number of images: ", str(num_images))

    j = None

    filename = get_filename(input_file)

    # for xt in translations:
    original_x = camera_pos[0]
    # for yt in translations:
    original_y = camera_pos[1]
    for x in xangles:
        for y in yangles:

            original_Z = camera_pos[2]
            new_camera = camera_pos
            new_camera[0] = original_x
            new_camera[2] = original_Z

            for p in passes:

                i = render_count
                scene = Scene()
                pass_config = scene_config

                destination = output_dir

                if p.type == RenderTargetType.DIRECT or p.type == RenderTargetType.PATH:
                    destination = os.path.join(destination, 'rgb')
                elif p.type == RenderTargetType.DEPTH:
                    destination = os.path.join(destination, 'depth')
                elif p.type == RenderTargetType.NORMAL:
                    destination = os.path.join(destination, 'normal')
                elif p.type == RenderTargetType.SH_NORMAL:
                    destination = os.path.join(destination, 'sh_normal')
                elif p.type == RenderTargetType.POSITION:
                    destination = os.path.join(destination, 'pos')
                elif p.type == RenderTargetType.UV:
                    destination = os.path.join(destination, 'uv')

                check_mkdir(destination)
                destination = os.path.join(destination, output_name + '_%03i' % (i))

                    # Set the pass integrator
                pass_config['integrator'] = p.config
                sceneResID = register_scene_config(scene, pmgr, scene_config)

                # Create a shallow copy of the scene so that the queue can tell apart the two
                # rendering processes. This takes almost no extra memory
                newScene = mitsuba.render.Scene(scene)
                pmgr = PluginManager.getInstance()
                newSensor = pmgr.createObject(scene.getSensor().getProperties())

                # Calculate the rotations
                yrotation = Transform.rotate(Vector(1, 0, 0), y)
                xrotation = Transform.rotate(Vector(0, 1, 0), x)
                rotationCur = xrotation * yrotation

                # Set the new camera position, applying the rotations
                new_pos = rotationCur * new_camera
                print(new_pos)
                new_transform = Transform.lookAt(Point(new_pos), Point(0, 0, 0), Vector(0, 1, 0))
                newSensor.setWorldTransform(new_transform)

                newFilm = pmgr.createObject(scene.getFilm().getProperties())
                newFilm.configure()
                newSensor.addChild(newFilm)
                newSensor.configure()
                newScene.addSensor(newSensor)
                newScene.setSensor(newSensor)
                newScene.setSampler(scene.getSampler())
                newScene.setDestinationFile(destination)

                # Create a render job and insert it into the queue. Note how the resource
                # ID of the original scene is provided to avoid sending the full scene
                # contents over the network multiple times.
                j = RenderJob('myRenderJob' + str(i), newScene, queue, sceneResID)
                j.start()

            queue.waitLeft(0)
            render_count += 1

        print("Full Set")

    # Wait for all jobs to finish and release resources
    queue.waitLeft(0)

    finish = time.time()
    print("Run Time:", finish - start)
Example #22
0
    def render(self, filename):
        self.scheduler.start()
        # create globals
        integrator = self.pmgr.create({'type': self.setup['integrator']})
        emitter = self.pmgr.create({'type': self.setup['emitter']})
        sensor = self.pmgr.create({
            'type': self.setup['sensor'],
            'film': {
                'type': self.setup['film'],
                'width': self.setup['width'],
                'height': self.setup['height'],
                'pixelFormat': self.setup['pixelFormat'],
                'exposure': self.setup['exposure'],
                'banner': self.setup['banner']
            },
            'sampler': {
                'type': self.setup['sampler'],
                'sampleCount': self.setup['sampleCount']
            },
            'fov': self.setup['fov'],
        })

        scene = Scene()
        scene.addChild(integrator)
        scene.addChild(emitter)
        scene.addChild(sensor)
        for mesh in self.mesh:
            scene.addChild(mesh)
        scene.configure()
        scene.initialize()  # needed to force build of kd-tree

        transformCurr = Transform.lookAt(self.setup['eye'],
                                         self.setup['target'],
                                         self.setup['camera_up'])
        sensor.setWorldTransform(transformCurr)
        scene.setDestinationFile(filename)

        job = RenderJob('job', scene, self.queue)
        job.start()

        self.queue.waitLeft(0)
        self.queue.join()
        self.scheduler.stop()
Example #23
0
class MitsubaRenderer(AbstractRenderer):
    def __init__(self, scene):
        super(MitsubaRenderer, self).__init__(scene)

    def render(self):
        self.__initialize()
        self.__add_integrator()
        self.__add_lights()
        self.__add_active_camera()
        self.__add_active_view()
        self.__add_active_primitives()
        self.__add_others()
        self.__run_mitsuba()

    def __initialize(self):
        self.__initialize_mitsuba_setting()
        self.__initialize_image_setting()
        self.__initialize_geometry_setting()

    def __initialize_mitsuba_setting(self):
        self.plgr = PluginManager.getInstance()
        self.output_dir = self.scene.output_dir

        mitsuba_module_path = os.path.dirname(inspect.getfile(MitsubaRenderer))
        self.file_resolver = Thread.getThread().getFileResolver()
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "xml_files/"))
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "textures/"))
        self.file_resolver.appendPath(
            os.path.join(mitsuba_module_path, "shapes/"))

        self.mitsuba_scene = Scene()

    def __initialize_image_setting(self):
        active_view = self.scene.active_view
        self.image_name = os.path.join(self.output_dir, active_view.name)
        self.image_width = active_view.width
        self.image_height = active_view.height

    def __initialize_geometry_setting(self):
        active_view = self.scene.active_view
        self.global_transform = self.scene.global_transform
        self.floor_height = 1e-12
        if len(active_view.vertices) == 0:
            dim = active_view.vertices.shape[1]
            self.transformed_bbox_min = np.zeros(dim)
            self.transformed_bbox_max = np.ones(dim)

    def __add_integrator(self):
        if self.with_alpha:
            integrator = self.plgr.create({
                "type": "volpath",
                "rrDepth": 20
            })
        else:
            integrator = self.plgr.create({
                "type": "direct",
                "shadingSamples": 16
            })
        self.mitsuba_scene.addChild(integrator)

    def __add_lights(self):
        #TODO: load lights from scene
        front_light = self.plgr.create({
            "type": "sphere",
            "center": Point(3.0, 6.0, 4.0),
            "radius": 2.5,
            "emitter": {
                "type": "area",
                "radiance": Spectrum(10.0),
                "samplingWeight": 10.0
            }
        })

        side_light = self.plgr.create({
            "type": "point",
            "position": Point(4.0, 4.0, -1.0),
            "intensity": Spectrum(5.0)
        })

        back_light = self.plgr.create({
            "type": "point",
            "position": Point(-0, 5.0, -1),
            "intensity": Spectrum(5.0)
        })

        self.mitsuba_scene.addChild(front_light)
        #self.mitsuba_scene.addChild(side_light);
        #self.mitsuba_scene.addChild(back_light);

    def __add_active_camera(self):
        active_view = self.scene.active_view
        camera = self.scene.active_camera
        if active_view.transparent_bg:
            pixel_format = "rgba"
        else:
            pixel_format = "rgb"

        crop_bbox = np.array(camera.crop_bbox)
        if np.amax(crop_bbox) <= 1.0:
            # bbox is relative.
            crop_bbox[:, 0] *= self.image_width
            crop_bbox[:, 1] *= self.image_height

        assert (np.all(crop_bbox >= 0))
        assert (np.all(crop_bbox[:, 0] <= self.image_width))
        assert (np.all(crop_bbox[:, 1] <= self.image_height))

        mitsuba_camera = self.plgr.create({
            "type":
            "perspective",
            "fov":
            float(camera.fovy),
            "fovAxis":
            "y",
            "toWorld":
            Transform.lookAt(Point(*camera.location),
                             Point(*camera.look_at_point),
                             Vector(*camera.up_direction)),
            "film": {
                "type": "ldrfilm",
                "width": self.image_width,
                "height": self.image_height,
                "cropOffsetX": int(crop_bbox[0, 0]),
                "cropOffsetY": int(crop_bbox[0, 1]),
                "cropWidth": int(crop_bbox[1, 0] - crop_bbox[0, 0]),
                "cropHeight": int(crop_bbox[1, 1] - crop_bbox[0, 1]),
                "banner": False,
                "pixelFormat": pixel_format,
                "rfilter": {
                    "type": "gaussian"
                }
            },
            "sampler": {
                "type": "halton",
                "sampleCount": 4,
            }
        })
        self.mitsuba_scene.addChild(mitsuba_camera)

    def __add_active_view(self):
        self.__add_view(self.scene.active_view)

    def __add_view(self, active_view, parent_transform=None):
        if len(active_view.subviews) > 0:
            for view in active_view.subviews:
                if parent_transform is None:
                    view_transform = self.__get_view_transform(active_view)
                else:
                    view_transform = parent_transform * self.__get_view_transform(
                        active_view)
                self.__add_view(view, view_transform)
            return

        if len(active_view.faces) == 0: return

        old_active_view = self.scene.active_view
        self.scene.active_view = active_view
        mesh_file, ext = self.__save_temp_mesh(active_view)
        normalize_transform = self.__get_normalize_transform(active_view)
        view_transform = self.__get_view_transform(active_view)
        if parent_transform is not None:
            view_transform = parent_transform * view_transform
        glob_transform = self.__get_glob_transform()

        total_transform = glob_transform * normalize_transform * view_transform
        material_setting = self.__get_material_setting(active_view)
        setting = {
            "type": ext[1:],
            "filename": mesh_file,
            "faceNormals": False,
            "toWorld": total_transform
        }
        setting.update(material_setting)
        target_shape = self.plgr.create(setting)
        self.mitsuba_scene.addChild(target_shape)

        M = (glob_transform * normalize_transform *
             view_transform).getMatrix()
        M = np.array([
            [M[0, 0], M[0, 1], M[0, 2], M[0, 3]],
            [M[1, 0], M[1, 1], M[1, 2], M[1, 3]],
            [M[2, 0], M[2, 1], M[2, 2], M[2, 3]],
            [M[3, 0], M[3, 1], M[3, 2], M[3, 3]],
        ])
        vertices = active_view.vertices
        vertices = np.hstack((vertices, np.ones((len(vertices), 1))))
        vertices = np.dot(M, vertices.T).T
        vertices = np.divide(vertices[:, 0:3], vertices[:, 3][:, np.newaxis])
        self.transformed_bbox_min = np.amin(vertices, axis=0)
        self.transformed_bbox_max = np.amax(vertices, axis=0)
        center = active_view.center
        floor_height = self.transformed_bbox_min[1]
        if self.floor_height is None or self.floor_height > floor_height:
            self.floor_height = floor_height

        self.scene.active_view = old_active_view

    def __add_active_primitives(self):
        self.__add_primitives(self.scene.active_view)

    def __add_primitives(self, active_view, parent_transform=None):
        if len(active_view.subviews) > 0:
            for view in active_view.subviews:
                if parent_transform is None:
                    view_transform = self.__get_view_transform(active_view)
                else:
                    view_transform = parent_transform * self.__get_view_transform(
                        active_view)
                self.__add_primitives(view, view_transform)
            return

        old_active_view = self.scene.active_view
        self.scene.active_view = active_view
        scale = active_view.scale
        normalize_transform = self.__get_normalize_transform(active_view)
        view_transform = self.__get_view_transform(active_view)
        if parent_transform is not None:
            view_transform = parent_transform * view_transform
        glob_transform = self.__get_glob_transform()
        total_transform = glob_transform * view_transform * normalize_transform

        primitives = self.scene.active_view.primitives
        for shape in primitives:
            if shape.color[3] <= 0.0: continue
            color = {
                "type": "plastic",
                "diffuseReflectance": Spectrum(shape.color[:3].tolist())
            }
            if shape.color[3] < 1.0:
                color = {
                    "type": "mask",
                    "opacity": Spectrum(active_view.alpha),
                    "bsdf": color
                }
            if isinstance(shape, Cylinder):
                if shape.radius <= 0.0: continue
                setting = self.__add_cylinder(shape)
                setting["bsdf"] = color
                setting["toWorld"] = total_transform
            elif isinstance(shape, Cone):
                if shape.radius <= 0.0: continue
                setting = self.__add_cone(shape)
                setting["bsdf"] = color
                setting["toWorld"] = total_transform * setting["toWorld"]
            elif isinstance(shape, Sphere):
                if shape.radius <= 0.0: continue
                # Due to weird behavior in Mitsuba, all transformation is
                # applied directly on radius and center variable.
                setting = self.__add_sphere(shape)
                setting["radius"] *= scale
                setting["center"] = total_transform * setting["center"]
                setting["bsdf"] = color
            else:
                raise NotImplementedError(
                    "Unknown primitive: {}".format(shape))

            mitsuba_primative = self.plgr.create(setting)
            self.mitsuba_scene.addChild(mitsuba_primative)
        self.scene.active_view = old_active_view

    def __add_sphere(self, shape):
        setting = {
            "type": "sphere",
            "radius": shape.radius,
            "center": Point(*shape.center)
        }
        return setting

    def __add_cylinder(self, shape):
        setting = {
            "type": "cylinder",
            "p0": Point(*shape.end_points[0]),
            "p1": Point(*shape.end_points[1]),
            "radius": shape.radius
        }
        return setting

    def __add_cone(self, shape):
        y_dir = np.array([0.0, 1.0, 0.0])
        v = shape.end_points[1] - shape.end_points[0]
        center = 0.5 * (shape.end_points[0] + shape.end_points[1])
        height = norm(v)
        scale = Transform.scale(Vector(shape.radius, height, shape.radius))
        axis = np.cross(y_dir, v)
        axis_len = norm(axis)
        angle = degrees(atan2(axis_len, np.dot(y_dir, v)))

        if (axis_len > 1e-6):
            axis /= axis_len
            rotate = Transform.rotate(Vector(*axis), angle)
        else:
            axis = np.array([1.0, 0.0, 0.0])
            rotate = Transform.rotate(Vector(*axis), angle)
        translate = Transform.translate(Vector(*center))

        cone_file = self.file_resolver.resolve("cone.ply")
        cone_transform = translate * rotate * scale
        setting = {
            "type": "ply",
            "filename": cone_file,
            "toWorld": cone_transform
        }
        return setting

    def __get_material_setting(self, active_view):
        setting = {}
        if self.with_texture_coordinates:
            diffuse_color = {
                "type": "checkerboard",
                "color0": Spectrum([1.0, 1.0, 1.0]),
                "color1": Spectrum([0.5, 0.5, 0.5]),
                "flipTexCoords": False,
            }
        else:
            if self.with_colors:
                diffuse_color = {"type": "vertexcolors"}
            else:
                diffuse_color = Spectrum(0.2)

        setting["bsdf"] = {
            "type": "roughplastic",
            "distribution": "beckmann",
            "alpha": 0.2,
            "diffuseReflectance": diffuse_color,
        }
        setting["bsdf"] = {
            "type": "twosided",
            "bsdf": setting["bsdf"]
        }
        if self.with_alpha:
            setting["bsdf"] = {
                "type": "mask",
                "opacity": Spectrum(active_view.alpha),
                "bsdf": setting["bsdf"]
            }
        if not self.with_colors and \
                not self.with_alpha and \
                not self.with_texture_coordinates:
            setting["subsurface"] = {
                "type": "dipole",
                "material": "Skimmilk",
                "scale": 0.5
            }
        elif self.with_texture_coordinates:
            setting["bsdf"]["bsdf"]["nonlinear"] = True
        elif not self.with_alpha and not self.with_texture_coordinates:
            setting["subsurface"] = {
                "type": "dipole",
                "material": "sprite",
                "scale": 0.5
            }
        return setting

    def __add_others(self):
        active_view = self.scene.active_view
        if active_view.with_quarter:
            self.__add_quarter()
        if active_view.with_axis:
            self.__add_axis()
        if active_view.background != "n":
            self.__add_floor()

    def __add_quarter(self):
        scale = self.scene.active_view.scale
        radius = 12.13 * scale
        thickness = 0.875 * scale
        face_scale = Transform.scale(Vector(radius))
        tail_offset = Transform.translate(Vector(0, 0, thickness))
        head_offset = Transform.translate(Vector(0, 0, -thickness)) *\
                Transform.scale(Vector(1.0, 1.0, -1.0))

        bbox_diag = 0.5 * norm(self.transformed_bbox_max -
                               self.transformed_bbox_min)
        custom_transform = Transform.translate(
            Vector(0.5, self.floor_height + radius + 0.01, -bbox_diag - 0.01))

        head_texture = self.file_resolver.resolve("head.png")
        tail_texture = self.file_resolver.resolve("tail.png")
        side_texture = self.file_resolver.resolve("side.png")

        quarter_ring = self.plgr.create({
            "type": "cylinder",
            "p0": Point(0.0, 0.0, thickness),
            "p1": Point(0.0, 0.0, -thickness),
            "radius": radius,
            "toWorld": custom_transform,
            "bsdf": {
                "type": "bumpmap",
                "texture": {
                    "type": "scale",
                    "scale": 0.01,
                    "texture": {
                        "type": "bitmap",
                        "filename": side_texture,
                        "gamma": 1.0,
                        "uscale": 100.0,
                    },
                },
                "bsdf": {
                    "type": "roughconductor",
                    "distribution": "ggx",
                    "alpha": 0.5,
                    "material": "Ni_palik"
                    #"diffuseReflectance": Spectrum(0.5)
                }
            }
        })
        head = self.plgr.create({
            "type": "disk",
            "toWorld": custom_transform * head_offset * face_scale,
            "bsdf": {
                "type": "diffuse",
                "reflectance": {
                    "type": "bitmap",
                    "filename": head_texture
                }
            }
        })
        tail = self.plgr.create({
            "type": "disk",
            "toWorld": custom_transform * tail_offset * face_scale,
            "bsdf": {
                "type": "diffuse",
                "reflectance": {
                    "type": "bitmap",
                    "filename": tail_texture
                }
            }
        })

        self.mitsuba_scene.addChild(quarter_ring)
        self.mitsuba_scene.addChild(head)
        self.mitsuba_scene.addChild(tail)

    def __add_axis(self):
        raise NotImplementedError("Adding axis is not supported")

    def __add_floor(self):
        rotate_transform = Transform.rotate(Vector(-1, 0, 0), 90)
        scale_transform = Transform.scale(Vector(100, 100, 100))
        translate_transform = Transform.translate(
            Vector(0.0, self.floor_height, 0.0))
        total_transform = translate_transform * scale_transform\
                * rotate_transform

        if self.scene.active_view.background == "d":
            reflectance = Spectrum(0.05)
        elif self.scene.active_view.background == "l":
            reflectance = Spectrum(0.5)
        else:
            reflectance = Spectrum(0.0)

        floor = self.plgr.create({
            "type": "rectangle",
            "toWorld": total_transform,
            "bsdf": {
                "type": "roughdiffuse",
                "diffuseReflectance": reflectance,
                "alpha": 0.5
            }
        })
        self.mitsuba_scene.addChild(floor)

    def __run_mitsuba(self):
        self.mitsuba_scene.configure()

        scheduler = Scheduler.getInstance()
        for i in range(multiprocessing.cpu_count()):
            scheduler.registerWorker(LocalWorker(i, "worker_{}".format(i)))
        scheduler.start()

        queue = RenderQueue()
        self.mitsuba_scene.setDestinationFile(self.image_name)

        job = RenderJob("render job: {}".format(self.image_name),
                        self.mitsuba_scene, queue)
        job.start()

        queue.waitLeft(0)
        queue.join()

        print(Statistics.getInstance().getStats())
        scheduler.stop()

    def __save_temp_mesh(self, active_view):
        basename, ext = os.path.splitext(self.image_name)
        path, name = os.path.split(basename)
        now = datetime.datetime.now()
        stamp = now.isoformat()
        tmp_dir = tempfile.gettempdir()
        ext = ".serialized"

        tmp_mesh_name = os.path.join(tmp_dir,
                                     "{}_{}{}".format(name, stamp, ext))

        vertices = active_view.vertices
        faces = active_view.faces
        voxels = active_view.voxels
        colors = active_view.vertex_colors.reshape((-1, 4), order="C")
        if self.with_texture_coordinates:
            uvs = active_view.texture_coordinates
        else:
            uvs = None

        dim = vertices.shape[1]
        num_faces, vertex_per_face = faces.shape
        if vertex_per_face == 4:
            faces = np.vstack([faces[:, [0, 1, 2]], faces[:, [0, 2, 3]]])
            vertex_per_face = 3
            num_faces *= 2
            colors = colors.reshape((-1, 4, 4), order="C")
            colors = np.vstack([
                colors[:, [0, 1, 2], :].reshape((-1, 4), order="C"),
                colors[:, [0, 2, 3], :].reshape((-1, 4), order="C")
            ])
            if uvs is not None:
                uvs = uvs.reshape((-1, 4, 2), order="C")
                uvs = np.vstack([
                    uvs[:, [0, 1, 2], :].reshape((-1, 2), order="C"),
                    uvs[:, [0, 2, 3], :].reshape((-1, 2), order="C")
                ])
        vertices = vertices[faces.ravel(order="C")]
        assert (len(colors) == len(vertices))
        faces = np.arange(len(vertices), dtype=int).reshape(
            (num_faces, vertex_per_face), order="C")

        mesh = pymesh.form_mesh(vertices, faces)

        if active_view.use_smooth_normal:
            normals = active_view.vertex_normals
        else:
            normals = None
        data = serialize_mesh(mesh, normals, colors, uvs)
        with open(tmp_mesh_name, 'wb') as fout:
            fout.write(data)
        return tmp_mesh_name, ext

    def __get_normalize_transform(self, active_view):
        centroid = active_view.center
        scale = active_view.scale

        normalize_transform = Transform.scale(Vector(scale, scale, scale)) *\
                Transform.translate(Vector(*(-1 * centroid)))
        return normalize_transform

    def __get_view_transform(self, active_view):
        transform = np.eye(4)
        transform[0:3, :] = active_view.transform.reshape((3, 4), order="F")
        view_transform = Transform(
            Matrix4x4(transform.ravel(order="C").tolist()))
        return view_transform

    def __get_glob_transform(self):
        glob_transform = Transform(
            Matrix4x4(self.global_transform.ravel(order="C").tolist()))
        return glob_transform

    @property
    def with_colors(self):
        return self.scene.active_view.with_colors

    @property
    def with_alpha(self):
        return self.scene.active_view.with_alpha

    @property
    def with_wire_frame(self):
        return self.scene.active_view.with_wire_frame

    @property
    def with_uniform_colors(self):
        return self.scene.active_view.with_uniform_colors

    @property
    def with_texture_coordinates(self):
        return self.scene.active_view.with_texture_coordinates
Example #24
0
 def addSceneLights(self):
     """ Adding all the pre-setted lights to the scene"""
     currScene = Scene(self.scene)
     for light in self.light:
         currScene.addChild(light)
     self.scene = currScene
Example #25
0
def construct_simple_scene(scene_objects, sensor) -> Scene:
    """
    Construct a simple scene containing given objects and using the given sensor. Uses the path integrator and constant
    emitter
    :param scene_objects: All scene child objects to add
    :param sensor: The mitsuba sensor definition to use for this scene
    :return: The scene created, already configured and initialized
    """
    pmgr = PluginManager.getInstance()
    integrator = pmgr.create({'type': 'path'})
    emitter = pmgr.create({'type': 'constant'})

    scene = Scene()
    scene.addChild(integrator)
    scene.addChild(emitter)
    scene.addChild(sensor)
    for obj in scene_objects:
        scene.addChild(obj)

    scene.configure()
    scene.initialize()

    return scene
Example #26
0
        class ApiExportContext(ExportContextBase):
            '''
            Python API
            '''

            EXPORT_API_TYPE = 'API'

            thread = None
            scheduler = None
            pmgr = None
            scene = None

            def __init__(self):
                super().__init__()

                self.thread = Thread.registerUnmanagedThread('exporter')
                self.thread.setFileResolver(main_fresolver)
                self.thread.setLogger(main_logger)

                self.pmgr = PluginManager.getInstance()
                self.scene = Scene()

            # Funtions binding to Mitsuba extension API

            def spectrum(self, value, mode=''):
                if not mode:
                    mode = self.color_mode

                spec = None

                if isinstance(value, (dict)):
                    if 'type' in value:
                        if value['type'] in {'rgb', 'srgb', 'spectrum'}:
                            spec = self.spectrum(value['value'], value['type'])

                        elif value['type'] == 'blackbody':
                            spec = Spectrum()
                            spec.fromContinuousSpectrum(
                                BlackBodySpectrum(value['temperature']))
                            spec.clampNegative()
                            spec = spec * value['scale']

                elif isinstance(value, (float, int)):
                    spec = Spectrum(value)

                elif isinstance(value, (str)):
                    contspec = InterpolatedSpectrum(
                        self.get_export_path(value))
                    spec = Spectrum()
                    spec.fromContinuousSpectrum(contspec)
                    spec.clampNegative()

                else:
                    try:
                        items = list(value)

                        for i in items:
                            if not isinstance(i, (float, int, tuple)):
                                raise Exception(
                                    'Error: spectrum list contains an unknown type'
                                )

                    except:
                        items = None

                    if items:
                        totitems = len(items)

                        if isinstance(items[0], (float, int)):
                            if totitems == 3 or totitems == 4:
                                spec = Spectrum()

                                if mode == 'srgb':
                                    spec.fromSRGB(items[0], items[1], items[2])

                                else:
                                    spec.fromLinearRGB(items[0], items[1],
                                                       items[2])

                            elif totitems == 1:
                                spec = Spectrum(items[0])

                            else:
                                MtsLog(
                                    'Expected spectrum items to be 1, 3 or 4, got %d.'
                                    % len(items), type(items), items)

                        else:
                            spec = Spectrum()
                            contspec = InterpolatedSpectrum()

                            for spd in items:
                                (wlen, val) = spd
                                contspec.append(wlen, val)

                            spec.fromContinuousSpectrum(contspec)
                            spec.clampNegative()

                    else:
                        MtsLog('Unknown spectrum type.', type(value), value)

                if spec is None:
                    spec = Spectrum(0.0)

                return spec

            def vector(self, x, y, z):
                # Blender is Z up but Mitsuba is Y up, convert the vector
                return Vector(x, z, -y)

            def point(self, x, y, z):
                # Blender is Z up but Mitsuba is Y up, convert the point
                return Point(x, z, -y)

            def transform_lookAt(self, origin, target, up, scale=None):
                # Blender is Z up but Mitsuba is Y up, convert the lookAt
                transform = Transform.lookAt(
                    Point(origin[0], origin[2], -origin[1]),
                    Point(target[0], target[2], -target[1]),
                    Vector(up[0], up[2], -up[1]))

                if scale is not None:
                    transform *= Transform.scale(Vector(scale, scale, 1))

                return transform

            def animated_lookAt(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, (origin, target, up, scale)) in motion:
                        transform.appendTransform(
                            t, self.transform_lookAt(origin, target, up,
                                                     scale))

                else:
                    (origin, target, up, scale) = motion[0][1]
                    transform = self.transform_lookAt(origin, target, up,
                                                      scale)

                return transform

            def transform_matrix(self, matrix):
                # Blender is Z up but Mitsuba is Y up, convert the matrix
                global_matrix = axis_conversion(to_forward="-Z",
                                                to_up="Y").to_4x4()
                l = matrix_to_list(global_matrix * matrix)
                mat = Matrix4x4(l)
                transform = Transform(mat)

                return transform

            def animated_transform(self, motion):
                if len(motion) == 2 and motion[0][1] == motion[1][1]:
                    del motion[1]

                if len(motion) > 1:
                    transform = AnimatedTransform()

                    for (t, m) in motion:
                        transform.appendTransform(t, self.transform_matrix(m))

                else:
                    transform = self.transform_matrix(motion[0][1])

                return transform

            def configure(self):
                '''
                Call Scene configure
                '''

                self.scene.addChild(self.pmgr.create(self.scene_data))
                self.scene.configure()

                # Reset the volume redundancy check
                ExportedVolumes.reset_vol_list()

            def cleanup(self):
                self.exit()

            def exit(self):
                # Do nothing
                pass