Exemplo n.º 1
0
def create_transform_on_bbsphere(bbox,
                                 radius_multiplier: float,
                                 positioning_vector: Vector3,
                                 tilt=None) -> Transform:
    """
    Create a camera with origin on the bounding sphere around an object and looking towards the center of that sphere
    Camera center is calculated as: bbsphere_center + radius_multiplier * bbsphere_radius * normalized_positioning_vector
    :param bbox: The bounding box of the object from which to calculate the bounding sphere
    :param radius_multiplier: The value to multiply the bounding sphere's radius with
    (= distance of the camera origin to the center of the bounding sphere)
    :param positioning_vector: The vector pointing towards the camera center
    :param tilt: Transform to apply to camera origin. Usually a small rotation to tilt the camera perspective a little
    :return: A transform on the bbsphere
    """
    bsphere = bbox.getBSphere()
    camera_target = bsphere.center

    camera_offset = radius_multiplier * bsphere.radius * normalize(
        positioning_vector)
    camera_origin = camera_target + camera_offset

    camera_transform = Transform.lookAt(
        tilt * camera_origin if tilt is not None else camera_origin,
        camera_target, Vector3(0., 0., 1.))

    return camera_transform
Exemplo n.º 2
0
            def transform_lookAt(self, origin, target, up, scale=None):
                # Blender is Z up but Mitsuba is Y up, convert the lookAt
                transform = Transform.lookAt(
                    Point(origin[0], origin[2], -origin[1]),
                    Point(target[0], target[2], -target[1]),
                    Vector(up[0], up[2], -up[1]))

                if scale is not None:
                    transform *= Transform.scale(Vector(scale, scale, 1))

                return transform
Exemplo n.º 3
0
            def transform_lookAt(self, origin, target, up, scale=None):
                # Blender is Z up but Mitsuba is Y up, convert the lookAt
                transform = Transform.lookAt(
                    Point(origin[0], origin[2], -origin[1]),
                    Point(target[0], target[2], -target[1]),
                    Vector(up[0], up[2], -up[1])
                )

                if scale is not None:
                    transform *= Transform.scale(Vector(scale, scale, 1))

                return transform
Exemplo n.º 4
0
    def __add_active_camera(self):
        active_view = self.scene.active_view
        camera = self.scene.active_camera
        if active_view.transparent_bg:
            pixel_format = "rgba"
        else:
            pixel_format = "rgb"

        crop_bbox = np.array(camera.crop_bbox)
        if np.amax(crop_bbox) <= 1.0:
            # bbox is relative.
            crop_bbox[:, 0] *= self.image_width
            crop_bbox[:, 1] *= self.image_height

        assert (np.all(crop_bbox >= 0))
        assert (np.all(crop_bbox[:, 0] <= self.image_width))
        assert (np.all(crop_bbox[:, 1] <= self.image_height))

        mitsuba_camera = self.plgr.create({
            "type":
            "perspective",
            "fov":
            float(camera.fovy),
            "fovAxis":
            "y",
            "toWorld":
            Transform.lookAt(Point(*camera.location),
                             Point(*camera.look_at_point),
                             Vector(*camera.up_direction)),
            "film": {
                "type": "ldrfilm",
                "width": self.image_width,
                "height": self.image_height,
                "cropOffsetX": int(crop_bbox[0, 0]),
                "cropOffsetY": int(crop_bbox[0, 1]),
                "cropWidth": int(crop_bbox[1, 0] - crop_bbox[0, 0]),
                "cropHeight": int(crop_bbox[1, 1] - crop_bbox[0, 1]),
                "banner": False,
                "pixelFormat": pixel_format,
                "rfilter": {
                    "type": "gaussian"
                }
            },
            "sampler": {
                "type": "halton",
                "sampleCount": 4,
            }
        })
        self.mitsuba_scene.addChild(mitsuba_camera)
Exemplo n.º 5
0
def makeScene():

    scene = Scene()

    pmgr = PluginManager.getInstance()

    # make shapes
    for i in range(100):
        shapeProps = Properties("sphere")
        shapeProps["center"] = Point(i, i, i)
        shapeProps["radius"] = 0.1
        shape = pmgr.createObject(shapeProps)
        shape.configure()

        scene.addChild(shape)

    # make perspective sensor
    sensorProps = Properties("perspective")
    sensorProps["toWorld"] = Transform.lookAt(Point(0, 0, 10), Point(0, 0, 0),
                                              Vector(0, 1, 0))
    sensorProps["fov"] = 45.0

    sensor = pmgr.createObject(sensorProps)

    # make film
    filmProps = Properties("ldrfilm")
    filmProps["width"] = 640
    filmProps["height"] = 480

    film = pmgr.createObject(filmProps)
    film.configure()

    sensor.addChild("film", film)
    sensor.configure()

    scene.addChild(sensor)
    scene.configure()

    return scene
Exemplo n.º 6
0
def makeScene():

    scene = Scene()

    pmgr = PluginManager.getInstance()

    # make shapes
    for i in range(100):
        shapeProps = Properties("sphere")
        shapeProps["center"] = Point(i, i, i)
        shapeProps["radius"] = 0.1
        shape = pmgr.createObject(shapeProps)
        shape.configure()

        scene.addChild(shape)

    # make perspective sensor
    sensorProps = Properties("perspective")
    sensorProps["toWorld"] = Transform.lookAt(Point(0, 0, 10), Point(0, 0, 0), Vector(0, 1, 0))
    sensorProps["fov"] = 45.0

    sensor = pmgr.createObject(sensorProps)

    # make film
    filmProps = Properties("ldrfilm")
    filmProps["width"]  = 640
    filmProps["height"] = 480

    film = pmgr.createObject(filmProps)
    film.configure()

    sensor.addChild("film", film)
    sensor.configure()

    scene.addChild(sensor)
    scene.configure()

    return scene
Exemplo n.º 7
0
def do_simulation_multiangle_seq(seqname):
    currdir = os.path.split(os.path.realpath(__file__))[0]
    sys.path.append(currdir + '/bin/rt/' + current_rt_program + '/python/2.7/')
    os.environ['PATH'] = currdir + '/bin/rt/' + current_rt_program + os.pathsep + os.environ['PATH']
    import mitsuba
    from mitsuba.core import Vector, Point, Ray, Thread, Scheduler, LocalWorker, PluginManager, Transform
    from mitsuba.render import SceneHandler
    from mitsuba.render import RenderQueue, RenderJob
    from mitsuba.render import Scene
    import multiprocessing

    scheduler = Scheduler.getInstance()
    for i in range(0, multiprocessing.cpu_count()):
        scheduler.registerWorker(LocalWorker(i, 'wrk%i' % i))
    scheduler.start()


    scene_path = session.get_scenefile_path()
    fileResolver = Thread.getThread().getFileResolver()
    fileResolver.appendPath(str(scene_path))
    scene = SceneHandler.loadScene(fileResolver.resolve(
        str(os.path.join(session.get_scenefile_path(), main_scene_xml_file))))
    scene.configure()
    scene.initialize()
    queue = RenderQueue()
    sceneResID = scheduler.registerResource(scene)
    bsphere = scene.getKDTree().getAABB().getBSphere()
    radius = bsphere.radius
    targetx, targety, targetz = bsphere.center[0], bsphere.center[1], bsphere.center[2]
    f = open(seqname + ".conf", 'r')
    params = json.load(f)
    obs_azimuth = params['seq1']['obs_azimuth']
    obs_zenith = params['seq2']['obs_zenith']
    cfgfile = session.get_config_file()
    f = open(cfgfile, 'r')
    cfg = json.load(f)
    viewR = cfg["sensor"]["obs_R"]
    mode = cfg["sensor"]["film_type"]
    azi_arr = map(lambda x: float(x), obs_azimuth.strip().split(":")[1].split(","))
    zeni_arr = map(lambda x: float(x), obs_zenith.strip().split(":")[1].split(","))
    seq_header = multi_file_prefix + "_" + seqname
    index = 0
    for azi in azi_arr:
        for zeni in zeni_arr:
            distFile = os.path.join(session.get_output_dir(),
                                    seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(".", "_"))
            newScene = Scene(scene)
            pmgr = PluginManager.getInstance()
            newSensor = pmgr.createObject(scene.getSensor().getProperties())
            theta = zeni / 180.0 * math.pi
            phi = (azi - 90) / 180.0 * math.pi
            scale_x = radius
            scale_z = radius
            toWorld = Transform.lookAt(
                Point(targetx - viewR * math.sin(theta) * math.cos(phi), targety + viewR * math.cos(theta),
                      targetz - viewR * math.sin(theta) * math.sin(phi)),  # original
                Point(targetx, targety, targetz),  # target
                Vector(0, 0, 1)  # up
            ) * Transform.scale(
                Vector(scale_x, scale_z, 1)  # 视场大小
            )
            newSensor.setWorldTransform(toWorld)
            newFilm = pmgr.createObject(scene.getFilm().getProperties())
            newFilm.configure()
            newSensor.addChild(newFilm)
            newSensor.configure()
            newScene.addSensor(newSensor)
            newScene.setSensor(newSensor)
            newScene.setSampler(scene.getSampler())
            newScene.setDestinationFile(str(distFile))
            job = RenderJob('Simulation Job' + "VA_"+str(azi)+"_VZ_"+str(zeni), newScene, queue, sceneResID)
            job.start()
        queue.waitLeft(0)
        queue.join()
    # handle npy
    if mode == "spectrum" and (output_format not in ("npy", "NPY")):
        for azi in azi_arr:
            for zeni in zeni_arr:
                distFile = os.path.join(session.get_output_dir(),
                                        seq_header + ("_VA_%.2f" % azi).replace(".", "_") + ("_VZ_%.2f" % zeni).replace(
                                            ".", "_"))
                data = np.load(distFile + ".npy")
                bandlist = cfg["sensor"]["bands"].split(",")
                RasterHelper.saveToHdr_no_transform(data, distFile, bandlist, output_format)
                os.remove(distFile + ".npy")
Exemplo n.º 8
0
def create_interpolated_trajectory(
        cameras: List[Transform],
        method: str,
        num_cameras_per_m: int = 75,
        num_cameras_per_rad: int = 225,
        num_total_cameras: int = 1000) -> List[Transform]:
    """
    Creates an interpolated camera trajectory using supplied key camera transforms
    :param cameras: Camera transformations to use as control points for interpolation
    :param method: How to interpolate: *bezier* (used for both camera centers and rotations):
    Smooth trajectory, but does not necessarily pass through the control points (except the first and last) or
    *catmullrom* (used for camera centers, using quaternion slerp for rotations):
    Passes through all control points, but needs more tuning to prevent weird behaviour
    :param num_cameras_per_m: catmullrom parameter: Camera centers per meter
    :param num_cameras_per_rad: catmullrom parameter: Camera rotations per radiant
    :param num_total_cameras: bezier parameter: Number of interpolated cameras between first and last key camera pose
    :return: A list of interpolated transforms
    """
    all_interpolated = []

    camera_pose_matrices = [
        util.convert_transform2numpy(camera) for camera in cameras
    ]
    if method == 'bezier':
        interpolated_cameras = bezier_curve(camera_pose_matrices,
                                            num_steps=num_total_cameras)
        for elem in interpolated_cameras:
            position = Point3(*elem[:3, 3].tolist())
            look = Vector3(*elem[:3, :3].dot(np.array([0, 0, 1])).tolist())
            up = Vector3(*(elem[:3, :3].dot(np.array([0, 1, 0]))).tolist())
            all_interpolated.append(
                Transform.lookAt(position, position + look, up))

    elif method == 'catmullrom':
        assert len(cameras) >= 4
        camera_groups = [
            camera_pose_matrices[idx:idx + 4]
            for idx in range(len(cameras) - 3)
        ]
        for camera_group in camera_groups:
            key_positions = (camera_group[1][:3, 3], camera_group[2][:3, 3])
            key_looks = (camera_group[1][:3, :3] * np.array([0, 0, 1]),
                         camera_group[2][:3, :3] * np.array([0, 0, 1]))
            dist = np.linalg.norm(key_positions[1] - key_positions[0])
            angle = math.acos(
                np.clip(np.sum(key_looks[1] @ key_looks[0]), -1., 1.))

            num_t = int(np.round(dist / 100. * num_cameras_per_m))
            num_r = int(np.round(angle * num_cameras_per_rad))
            num = max(40, max(num_t, num_r))

            interpolated_cameras = catmull_rom_spline(camera_group, num)

            for elem in interpolated_cameras:
                position = Point3(*elem[:3, 3].tolist())
                look = Vector3(*elem[:3, :3].dot(np.array([0, 0, 1])).tolist())
                up = Vector3(*(elem[:3, :3].dot(np.array([0, 1, 0]))).tolist())
                all_interpolated.append(
                    Transform.lookAt(position, position + look, up))

    else:
        raise NotImplementedError(
            f'The method you chose ({method}) is not implemented')

    return all_interpolated