Beispiel #1
0
    def _get_positions_vectors(self, camera, disparity):

        single_resolution_x = int(np.floor(camera.resolution[0]) / 2)

        east_vec = camera.unit_vectors[0]
        north_vec = camera.unit_vectors[1]
        normal_vec = camera.unit_vectors[2]

        angle_disparity = - np.arctan2(disparity.in_units(camera.width.units),
                                       camera.width[2])
        R = get_rotation_matrix(angle_disparity, north_vec)

        east_vec_rot = np.dot(R, east_vec)
        normal_vec_rot = np.dot(R, normal_vec)

        px = np.mat(np.linspace(-.5, .5, single_resolution_x))
        py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))

        sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3, 1) * px)
        sample_x = sample_x.transpose()
        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
        sample_y = sample_y.transpose()

        vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
                           dtype='float64', order='C')

        sample_x = np.repeat(sample_x.reshape(single_resolution_x, 1, 3),
                             camera.resolution[1], axis=1)
        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                             single_resolution_x, axis=0)

        normal_vecs = np.tile(
            normal_vec_rot, single_resolution_x * camera.resolution[1])
        normal_vecs = normal_vecs.reshape(
            single_resolution_x, camera.resolution[1], 3)
        east_vecs = np.tile(
            east_vec_rot, single_resolution_x * camera.resolution[1])
        east_vecs = east_vecs.reshape(
            single_resolution_x, camera.resolution[1], 3)

        # The maximum possible length of ray
        max_length = (unorm(camera.position - camera._domain_center)
                      + 0.5 * unorm(camera._domain_width)
                      + np.abs(self.disparity))
        # Rescale the ray to be long enough to cover the entire domain
        vectors = (sample_x + sample_y + normal_vecs * camera.width[2]) * \
            (max_length / camera.width[2])

        positions = np.tile(
            camera.position, single_resolution_x * camera.resolution[1])
        positions = positions.reshape(
            single_resolution_x, camera.resolution[1], 3)

        # Here the east_vecs is non-rotated one
        positions = positions + east_vecs * disparity

        mylog.debug(positions)
        mylog.debug(vectors)

        return vectors, positions
Beispiel #2
0
Datei: lens.py Projekt: cphyc/yt
    def _get_sampler_params(self, camera, render_source):
        px = np.linspace(-np.pi, np.pi, camera.resolution[0],
                         endpoint=True)[:, None]
        py = np.linspace(-np.pi / 2.0,
                         np.pi / 2.0,
                         camera.resolution[1],
                         endpoint=True)[None, :]

        vectors = np.zeros((camera.resolution[0], camera.resolution[1], 3),
                           dtype="float64",
                           order="C")
        vectors[:, :, 0] = np.cos(px) * np.cos(py)
        vectors[:, :, 1] = np.sin(px) * np.cos(py)
        vectors[:, :, 2] = np.sin(py)

        # The maximum possible length of ray
        max_length = unorm(camera.position - camera._domain_center
                           ) + 0.5 * unorm(camera._domain_width)
        # Rescale the ray to be long enough to cover the entire domain
        vectors = vectors * max_length

        positions = np.tile(camera.position, camera.resolution[0] *
                            camera.resolution[1]).reshape(
                                camera.resolution[0], camera.resolution[1], 3)

        R1 = get_rotation_matrix(0.5 * np.pi, [1, 0, 0])
        R2 = get_rotation_matrix(0.5 * np.pi, [0, 0, 1])
        uv = np.dot(R1, camera.unit_vectors)
        uv = np.dot(R2, uv)
        vectors.reshape((camera.resolution[0] * camera.resolution[1], 3))
        vectors = np.dot(vectors, uv)
        vectors.reshape((camera.resolution[0], camera.resolution[1], 3))

        if render_source.zbuffer is not None:
            image = render_source.zbuffer.rgba
        else:
            image = self.new_image(camera)

        dummy = np.ones(3, dtype="float64")
        image.shape = (camera.resolution[0], camera.resolution[1], 4)
        vectors.shape = (camera.resolution[0], camera.resolution[1], 3)
        positions.shape = (camera.resolution[0], camera.resolution[1], 3)

        sampler_params = dict(
            vp_pos=positions,
            vp_dir=vectors,
            center=self.back_center,
            bounds=(0.0, 1.0, 0.0, 1.0),
            x_vec=dummy,
            y_vec=dummy,
            width=np.zeros(3, dtype="float64"),
            image=image,
            lens_type="spherical",
        )
        return sampler_params
Beispiel #3
0
    def _generate_container_field_sph(self, field):
        if field not in ["dts", "t"]:
            raise KeyError(field)

        length = unorm(self.vec)
        pos = self[self.ds._sph_ptypes[0], "particle_position"]
        r = pos - self.start_point
        l = udot(r, self.vec / length)

        if field == "t":
            return l / length

        hsml = self[self.ds._sph_ptypes[0], "smoothing_length"]
        mass = self[self.ds._sph_ptypes[0], "particle_mass"]
        dens = self[self.ds._sph_ptypes[0], "density"]
        # impact parameter from particle to ray
        b = np.sqrt(np.sum(r ** 2, axis=1) - l ** 2)

        # Use an interpolation table to evaluate the integrated 2D
        # kernel from the dimensionless impact parameter b/hsml.
        itab = SPHKernelInterpolationTable(self.ds.kernel_name)
        dl = itab.interpolate_array(b / hsml) * mass / dens / hsml ** 2
        return dl / length
Beispiel #4
0
    def _get_sampler_params(self, camera, render_source):
        if self.disparity is None:
            self.disparity = camera.width[0] / 1000.

        single_resolution_y = int(np.floor(camera.resolution[1]) / 2)
        px = np.linspace(-np.pi, np.pi, camera.resolution[0],
                         endpoint=True)[:, None]
        py = np.linspace(-np.pi / 2.,
                         np.pi / 2.,
                         single_resolution_y,
                         endpoint=True)[None, :]

        vectors = np.zeros((camera.resolution[0], single_resolution_y, 3),
                           dtype='float64',
                           order='C')
        vectors[:, :, 0] = np.cos(px) * np.cos(py)
        vectors[:, :, 1] = np.sin(px) * np.cos(py)
        vectors[:, :, 2] = np.sin(py)

        # The maximum possible length of ray
        max_length = (unorm(camera.position - camera._domain_center) +
                      0.5 * unorm(camera._domain_width) +
                      np.abs(self.disparity))
        # Rescale the ray to be long enough to cover the entire domain
        vectors = vectors * max_length

        R1 = get_rotation_matrix(0.5 * np.pi, [1, 0, 0])
        R2 = get_rotation_matrix(0.5 * np.pi, [0, 0, 1])
        uv = np.dot(R1, camera.unit_vectors)
        uv = np.dot(R2, uv)

        vectors.reshape((camera.resolution[0] * single_resolution_y, 3))
        vectors = np.dot(vectors, uv)
        vectors.reshape((camera.resolution[0], single_resolution_y, 3))

        vectors2 = np.zeros((camera.resolution[0], single_resolution_y, 3),
                            dtype='float64',
                            order='C')
        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, single_resolution_y))
        vectors2[:, :, 1] = np.cos(px) * np.ones((1, single_resolution_y))
        vectors2[:, :, 2] = 0

        vectors2.reshape((camera.resolution[0] * single_resolution_y, 3))
        vectors2 = np.dot(vectors2, uv)
        vectors2.reshape((camera.resolution[0], single_resolution_y, 3))

        positions = np.tile(camera.position,
                            camera.resolution[0] * single_resolution_y)
        positions = positions.reshape(camera.resolution[0],
                                      single_resolution_y, 3)

        # The left and right are switched here since VR is in LHS.
        positions_left = positions + vectors2 * self.disparity
        positions_right = positions + vectors2 * (-self.disparity)

        if render_source.zbuffer is not None:
            image = render_source.zbuffer.rgba
        else:
            image = self.new_image(camera)

        dummy = np.ones(3, dtype='float64')

        vectors_comb = uhstack([vectors, vectors])
        positions_comb = uhstack([positions_left, positions_right])

        image.shape = (camera.resolution[0], camera.resolution[1], 4)
        vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
        positions_comb.shape = (camera.resolution[0], camera.resolution[1], 3)

        sampler_params = dict(vp_pos=positions_comb,
                              vp_dir=vectors_comb,
                              center=self.back_center,
                              bounds=(0.0, 1.0, 0.0, 1.0),
                              x_vec=dummy,
                              y_vec=dummy,
                              width=np.zeros(3, dtype="float64"),
                              image=image,
                              lens_type="stereo-spherical")
        return sampler_params
Beispiel #5
0
    def _get_sampler_params(self, camera, render_source):
        # Enforce width[1] / width[0] = resolution[1] / resolution[0]
        camera.width[1] = camera.width[0] \
            * (camera.resolution[1] / camera.resolution[0])

        if render_source.zbuffer is not None:
            image = render_source.zbuffer.rgba
        else:
            image = self.new_image(camera)

        east_vec = camera.unit_vectors[0]
        north_vec = camera.unit_vectors[1]
        normal_vec = camera.unit_vectors[2]

        px = np.mat(np.linspace(-.5, .5, camera.resolution[0]))
        py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))

        sample_x = camera.width[0] * np.array(east_vec.reshape(3, 1) * px)
        sample_x = sample_x.transpose()
        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
        sample_y = sample_y.transpose()

        vectors = np.zeros((camera.resolution[0], camera.resolution[1], 3),
                           dtype='float64',
                           order='C')

        sample_x = np.repeat(sample_x.reshape(camera.resolution[0], 1, 3),
                             camera.resolution[1],
                             axis=1)
        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                             camera.resolution[0],
                             axis=0)

        normal_vecs = np.tile(normal_vec,
                              camera.resolution[0] * camera.resolution[1])
        normal_vecs = normal_vecs.reshape(camera.resolution[0],
                                          camera.resolution[1], 3)

        # The maximum possible length of ray
        max_length = (unorm(camera.position - camera._domain_center) +
                      0.5 * unorm(camera._domain_width))

        # Rescale the ray to be long enough to cover the entire domain
        vectors = (sample_x + sample_y + normal_vecs * camera.width[2]) * \
            (max_length / camera.width[2])

        positions = np.tile(camera.position,
                            camera.resolution[0] * camera.resolution[1])
        positions = positions.reshape(camera.resolution[0],
                                      camera.resolution[1], 3)

        uv = np.ones(3, dtype='float64')

        image = self.new_image(camera)

        sampler_params =\
            dict(vp_pos=positions,
                 vp_dir=vectors,
                 center=self.back_center,
                 bounds=(0.0, 1.0, 0.0, 1.0),
                 x_vec=uv,
                 y_vec=uv,
                 width=np.zeros(3, dtype='float64'),
                 image=image,
                 lens_type="perspective")

        mylog.debug(positions)
        mylog.debug(vectors)

        return sampler_params