예제 #1
0
    def __init__(self,
                 center,
                 A,
                 B,
                 C,
                 e0,
                 tilt,
                 fields=None,
                 ds=None,
                 field_parameters=None,
                 data_source=None):
        YTSelectionContainer3D.__init__(self, center, ds, field_parameters,
                                        data_source)
        # make sure the magnitudes of semi-major axes are in order
        if A < B or B < C:
            raise YTEllipsoidOrdering(ds, A, B, C)
        # make sure the smallest side is not smaller than dx
        self._A = self.ds.quan(A, 'code_length')
        self._B = self.ds.quan(B, 'code_length')
        self._C = self.ds.quan(C, 'code_length')
        if self._C < self.index.get_smallest_dx():
            raise YTSphereTooSmall(self.ds, self._C,
                                   self.index.get_smallest_dx())
        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
        self._tilt = tilt

        # find the t1 angle needed to rotate about z axis to align e0 to x
        t1 = np.arctan(e0[1] / e0[0])
        # rotate e0 by -t1
        RZ = get_rotation_matrix(t1, (0, 0, 1)).transpose()
        r1 = (e0 * RZ).sum(axis=1)
        # find the t2 angle needed to rotate about y axis to align e0 to x
        t2 = np.arctan(-r1[2] / r1[0])
        """
        calculate the original e1
        given the tilt about the x axis when e0 was aligned 
        to x after t1, t2 rotations about z, y
        """
        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
        RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
        RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
        e1 = ((0, 1, 0) * RX).sum(axis=1)
        e1 = (e1 * RY).sum(axis=1)
        e1 = (e1 * RZ).sum(axis=1)
        e2 = np.cross(e0, e1)

        self._e1 = e1
        self._e2 = e2

        self.set_field_parameter('A', A)
        self.set_field_parameter('B', B)
        self.set_field_parameter('C', C)
        self.set_field_parameter('e0', e0)
        self.set_field_parameter('e1', e1)
        self.set_field_parameter('e2', e2)
예제 #2
0
파일: lens.py 프로젝트: cphyc/yt
    def _get_sampler_params(self, camera, render_source):
        px = np.linspace(-np.pi, np.pi, camera.resolution[0],
                         endpoint=True)[:, None]
        py = np.linspace(-np.pi / 2.0,
                         np.pi / 2.0,
                         camera.resolution[1],
                         endpoint=True)[None, :]

        vectors = np.zeros((camera.resolution[0], camera.resolution[1], 3),
                           dtype="float64",
                           order="C")
        vectors[:, :, 0] = np.cos(px) * np.cos(py)
        vectors[:, :, 1] = np.sin(px) * np.cos(py)
        vectors[:, :, 2] = np.sin(py)

        # The maximum possible length of ray
        max_length = unorm(camera.position - camera._domain_center
                           ) + 0.5 * unorm(camera._domain_width)
        # Rescale the ray to be long enough to cover the entire domain
        vectors = vectors * max_length

        positions = np.tile(camera.position, camera.resolution[0] *
                            camera.resolution[1]).reshape(
                                camera.resolution[0], camera.resolution[1], 3)

        R1 = get_rotation_matrix(0.5 * np.pi, [1, 0, 0])
        R2 = get_rotation_matrix(0.5 * np.pi, [0, 0, 1])
        uv = np.dot(R1, camera.unit_vectors)
        uv = np.dot(R2, uv)
        vectors.reshape((camera.resolution[0] * camera.resolution[1], 3))
        vectors = np.dot(vectors, uv)
        vectors.reshape((camera.resolution[0], camera.resolution[1], 3))

        if render_source.zbuffer is not None:
            image = render_source.zbuffer.rgba
        else:
            image = self.new_image(camera)

        dummy = np.ones(3, dtype="float64")
        image.shape = (camera.resolution[0], camera.resolution[1], 4)
        vectors.shape = (camera.resolution[0], camera.resolution[1], 3)
        positions.shape = (camera.resolution[0], camera.resolution[1], 3)

        sampler_params = dict(
            vp_pos=positions,
            vp_dir=vectors,
            center=self.back_center,
            bounds=(0.0, 1.0, 0.0, 1.0),
            x_vec=dummy,
            y_vec=dummy,
            width=np.zeros(3, dtype="float64"),
            image=image,
            lens_type="spherical",
        )
        return sampler_params
예제 #3
0
    def rotate(self, theta, rot_vector=None, rot_center=None):
        r"""Rotate by a given angle

        Rotate the view.  If `rot_vector` is None, rotation will occur
        around the `north_vector`.

        Parameters
        ----------
        theta : float, in radians
             Angle (in radians) by which to rotate the view.
        rot_vector  : array_like, optional
            Specify the rotation vector around which rotation will
            occur.  Defaults to None, which sets rotation around
            `north_vector`
        rot_center  : array_like, optional
            Specify the center around which rotation will occur. Defaults
            to None, which sets rotation around the original camera position
            (i.e. the camera position does not change)

        Examples
        --------

        >>> import yt
        >>> import numpy as np
        >>> from yt.visualization.volume_rendering.api import Scene
        >>> sc = Scene()
        >>> cam = sc.add_camera()
        >>> # rotate the camera by pi / 4 radians:
        >>> cam.rotate(np.pi/4.0)
        >>> # rotate the camera about the y-axis instead of cam.north_vector:
        >>> cam.rotate(np.pi/4.0, np.array([0.0, 1.0, 0.0]))
        >>> # rotate the camera about the origin instead of its own position:
        >>> cam.rotate(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))

        """
        rotate_all = rot_vector is not None
        if rot_vector is None:
            rot_vector = self.north_vector
        if rot_center is None:
            rot_center = self._position
        rot_vector = ensure_numpy_array(rot_vector)
        rot_vector = rot_vector/np.linalg.norm(rot_vector)

        new_position = self._position - rot_center
        R = get_rotation_matrix(theta, rot_vector)
        new_position = np.dot(R, new_position) + rot_center

        if (new_position == self._position).all():
            normal_vector = self.unit_vectors[2]
        else:
            normal_vector = rot_center - new_position
        normal_vector = normal_vector/np.sqrt((normal_vector**2).sum())

        if rotate_all:
            self.switch_view(
                normal_vector=np.dot(R, normal_vector),
                north_vector=np.dot(R, self.unit_vectors[1]))
        else:
            self.switch_view(normal_vector=np.dot(R, normal_vector))
        if (new_position != self._position).any(): self.set_position(new_position)
예제 #4
0
    def _get_positions_vectors(self, camera, disparity):

        single_resolution_x = int(np.floor(camera.resolution[0]) / 2)

        east_vec = camera.unit_vectors[0]
        north_vec = camera.unit_vectors[1]
        normal_vec = camera.unit_vectors[2]

        angle_disparity = - np.arctan2(disparity.in_units(camera.width.units),
                                       camera.width[2])
        R = get_rotation_matrix(angle_disparity, north_vec)

        east_vec_rot = np.dot(R, east_vec)
        normal_vec_rot = np.dot(R, normal_vec)

        px = np.mat(np.linspace(-.5, .5, single_resolution_x))
        py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))

        sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3, 1) * px)
        sample_x = sample_x.transpose()
        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
        sample_y = sample_y.transpose()

        vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
                           dtype='float64', order='C')

        sample_x = np.repeat(sample_x.reshape(single_resolution_x, 1, 3),
                             camera.resolution[1], axis=1)
        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                             single_resolution_x, axis=0)

        normal_vecs = np.tile(
            normal_vec_rot, single_resolution_x * camera.resolution[1])
        normal_vecs = normal_vecs.reshape(
            single_resolution_x, camera.resolution[1], 3)
        east_vecs = np.tile(
            east_vec_rot, single_resolution_x * camera.resolution[1])
        east_vecs = east_vecs.reshape(
            single_resolution_x, camera.resolution[1], 3)

        # The maximum possible length of ray
        max_length = (unorm(camera.position - camera._domain_center)
                      + 0.5 * unorm(camera._domain_width)
                      + np.abs(self.disparity))
        # Rescale the ray to be long enough to cover the entire domain
        vectors = (sample_x + sample_y + normal_vecs * camera.width[2]) * \
            (max_length / camera.width[2])

        positions = np.tile(
            camera.position, single_resolution_x * camera.resolution[1])
        positions = positions.reshape(
            single_resolution_x, camera.resolution[1], 3)

        # Here the east_vecs is non-rotated one
        positions = positions + east_vecs * disparity

        mylog.debug(positions)
        mylog.debug(vectors)

        return vectors, positions
    def __init__(self, center, A, B, C, e0, tilt, fields=None,
                 ds=None, field_parameters=None, data_source=None):
        YTSelectionContainer3D.__init__(self, center, ds,
                                        field_parameters, data_source)
        # make sure the magnitudes of semi-major axes are in order
        if A<B or B<C:
            raise YTEllipsoidOrdering(ds, A, B, C)
        # make sure the smallest side is not smaller than dx
        self._A = self.ds.quan(A, 'code_length')
        self._B = self.ds.quan(B, 'code_length')
        self._C = self.ds.quan(C, 'code_length')
        if self._C < self.index.get_smallest_dx():
            raise YTSphereTooSmall(self.ds, self._C, self.index.get_smallest_dx())
        self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
        self._tilt = tilt
 
        # find the t1 angle needed to rotate about z axis to align e0 to x
        t1 = np.arctan(e0[1] / e0[0])
        # rotate e0 by -t1
        RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
        r1 = (e0 * RZ).sum(axis = 1)
        # find the t2 angle needed to rotate about y axis to align e0 to x
        t2 = np.arctan(-r1[2] / r1[0])
        """
        calculate the original e1
        given the tilt about the x axis when e0 was aligned 
        to x after t1, t2 rotations about z, y
        """
        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
        e1 = ((0, 1, 0) * RX).sum(axis=1)
        e1 = (e1 * RY).sum(axis=1)
        e1 = (e1 * RZ).sum(axis=1)
        e2 = np.cross(e0, e1)

        self._e1 = e1
        self._e2 = e2

        self.set_field_parameter('A', A)
        self.set_field_parameter('B', B)
        self.set_field_parameter('C', C)
        self.set_field_parameter('e0', e0)
        self.set_field_parameter('e1', e1)
        self.set_field_parameter('e2', e2)
예제 #6
0
    def _get_px_py_dz(self, camera, pos, res, disparity):

        res0_h = np.floor(res[0]) / 2

        east_vec = camera.unit_vectors[0]
        north_vec = camera.unit_vectors[1]
        normal_vec = camera.unit_vectors[2]

        angle_disparity = -np.arctan2(disparity.d, camera.width[2].d)
        R = get_rotation_matrix(angle_disparity, north_vec)

        east_vec_rot = np.dot(R, east_vec)
        normal_vec_rot = np.dot(R, normal_vec)

        camera_position_shift = camera.position + east_vec * disparity
        camera_position_shift = camera_position_shift.in_units('code_length').d
        width = camera.width.in_units('code_length').d
        sight_vector = pos - camera_position_shift
        pos1 = sight_vector

        for i in range(0, sight_vector.shape[0]):
            sight_vector_norm = np.sqrt(
                np.dot(sight_vector[i], sight_vector[i]))
            sight_vector[i] = sight_vector[i] / sight_vector_norm
        sight_center = camera_position_shift + camera.width[2] * normal_vec_rot

        for i in range(0, sight_vector.shape[0]):
            sight_angle_cos = np.dot(sight_vector[i], normal_vec_rot)
            # clip sight_angle_cos since floating point noise might
            # cause it go outside the domain of arccos
            sight_angle_cos = np.clip(sight_angle_cos, -1.0, 1.0)
            if np.arccos(sight_angle_cos) < 0.5 * np.pi:
                sight_length = width[2] / sight_angle_cos
            else:
                # If the corner is on the backwards, then we put it outside of
                # the image It can not be simply removed because it may connect
                # to other corner within the image, which produces visible
                # domain boundary line
                sight_length = np.sqrt(width[0]**2 + width[1]**2)
                sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
            pos1[i] = camera_position_shift + sight_length * sight_vector[i]

        dx = np.dot(pos1 - sight_center, east_vec_rot)
        dy = np.dot(pos1 - sight_center, north_vec)
        dz = np.dot(pos - camera_position_shift, normal_vec_rot)

        # Transpose into image coords.
        if disparity > 0:
            px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')
        else:
            px = (res0_h * 1.5 + res0_h / camera.width[0].d * dx).astype('int')
        py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')

        return px, py, dz
예제 #7
0
    def _get_sampler_params(self, camera, render_source):
        if self.disparity is None:
            self.disparity = camera.width[0] / 1000.

        single_resolution_y = int(np.floor(camera.resolution[1]) / 2)
        px = np.linspace(-np.pi, np.pi, camera.resolution[0],
                         endpoint=True)[:, None]
        py = np.linspace(-np.pi / 2.,
                         np.pi / 2.,
                         single_resolution_y,
                         endpoint=True)[None, :]

        vectors = np.zeros((camera.resolution[0], single_resolution_y, 3),
                           dtype='float64',
                           order='C')
        vectors[:, :, 0] = np.cos(px) * np.cos(py)
        vectors[:, :, 1] = np.sin(px) * np.cos(py)
        vectors[:, :, 2] = np.sin(py)

        # The maximum possible length of ray
        max_length = (unorm(camera.position - camera._domain_center) +
                      0.5 * unorm(camera._domain_width) +
                      np.abs(self.disparity))
        # Rescale the ray to be long enough to cover the entire domain
        vectors = vectors * max_length

        R1 = get_rotation_matrix(0.5 * np.pi, [1, 0, 0])
        R2 = get_rotation_matrix(0.5 * np.pi, [0, 0, 1])
        uv = np.dot(R1, camera.unit_vectors)
        uv = np.dot(R2, uv)

        vectors.reshape((camera.resolution[0] * single_resolution_y, 3))
        vectors = np.dot(vectors, uv)
        vectors.reshape((camera.resolution[0], single_resolution_y, 3))

        vectors2 = np.zeros((camera.resolution[0], single_resolution_y, 3),
                            dtype='float64',
                            order='C')
        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, single_resolution_y))
        vectors2[:, :, 1] = np.cos(px) * np.ones((1, single_resolution_y))
        vectors2[:, :, 2] = 0

        vectors2.reshape((camera.resolution[0] * single_resolution_y, 3))
        vectors2 = np.dot(vectors2, uv)
        vectors2.reshape((camera.resolution[0], single_resolution_y, 3))

        positions = np.tile(camera.position,
                            camera.resolution[0] * single_resolution_y)
        positions = positions.reshape(camera.resolution[0],
                                      single_resolution_y, 3)

        # The left and right are switched here since VR is in LHS.
        positions_left = positions + vectors2 * self.disparity
        positions_right = positions + vectors2 * (-self.disparity)

        if render_source.zbuffer is not None:
            image = render_source.zbuffer.rgba
        else:
            image = self.new_image(camera)

        dummy = np.ones(3, dtype='float64')

        vectors_comb = uhstack([vectors, vectors])
        positions_comb = uhstack([positions_left, positions_right])

        image.shape = (camera.resolution[0], camera.resolution[1], 4)
        vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
        positions_comb.shape = (camera.resolution[0], camera.resolution[1], 3)

        sampler_params = dict(vp_pos=positions_comb,
                              vp_dir=vectors_comb,
                              center=self.back_center,
                              bounds=(0.0, 1.0, 0.0, 1.0),
                              x_vec=dummy,
                              y_vec=dummy,
                              width=np.zeros(3, dtype="float64"),
                              image=image,
                              lens_type="stereo-spherical")
        return sampler_params
예제 #8
0
 def _get_ellipsoid_parameters_basic(self):
     np.seterr(all="ignore")
     # check if there are 4 particles to form an ellipsoid
     # neglecting to check if the 4 particles in the same plane,
     # that is almost certainly never to occur,
     # will deal with it later if it ever comes up
     if np.size(self["particle_position_x"]) < 4:
         mylog.warning("Too few particles for ellipsoid parameters.")
         return (0, 0, 0, 0, 0, 0, 0)
     # Calculate the parameters that describe the ellipsoid of
     # the particles that constitute the halo. This function returns
     # all the parameters except for the center of mass.
     com = self.center_of_mass()
     position = [
         self["particle_position_x"],
         self["particle_position_y"],
         self["particle_position_z"],
     ]
     # Locate the furthest particle from com, its vector length and index
     DW = np.array([self.gridsize[0], self.gridsize[1], self.gridsize[2]])
     position = [position[0] - com[0], position[1] - com[1], position[2] - com[2]]
     # different cases of particles being on other side of boundary
     for axis in range(np.size(DW)):
         cases = np.array(
             [position[axis], position[axis] + DW[axis], position[axis] - DW[axis]]
         )
         # pick out the smallest absolute distance from com
         position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
     # find the furthest particle's index
     r = np.sqrt(position[0] ** 2 + position[1] ** 2 + position[2] ** 2)
     A_index = r.argmax()
     mag_A = r.max()
     # designate the A vector
     A_vector = (position[0][A_index], position[1][A_index], position[2][A_index])
     # designate the e0 unit vector
     e0_vector = A_vector / mag_A
     # locate the tB particle position by finding the max B
     e0_vector_copy = np.empty((np.size(position[0]), 3), dtype="float64")
     for i in range(3):
         e0_vector_copy[:, i] = e0_vector[i]
     rr = np.array(
         [position[0], position[1], position[2]]
     ).T  # Similar to tB_vector in old code.
     tC_vector = np.cross(e0_vector_copy, rr)
     te2 = tC_vector.copy()
     for dim in range(3):
         te2[:, dim] *= np.sum(tC_vector**2.0, axis=1) ** (-0.5)
     te1 = np.cross(te2, e0_vector_copy)
     length = np.abs(
         -np.sum(rr * te1, axis=1)
         * (1.0 - np.sum(rr * e0_vector_copy, axis=1) ** 2.0 * mag_A**-2.0)
         ** (-0.5)
     )
     # This problem apparently happens sometimes, that the NaNs are turned
     # into infs, which messes up the nanargmax below.
     length[length == np.inf] = 0.0
     tB_index = np.nanargmax(length)  # ignores NaNs created above.
     mag_B = length[tB_index]
     e1_vector = te1[tB_index]
     e2_vector = te2[tB_index]
     temp_e0 = rr.copy()
     temp_e1 = rr.copy()
     temp_e2 = rr.copy()
     for dim in range(3):
         temp_e0[:, dim] = e0_vector[dim]
         temp_e1[:, dim] = e1_vector[dim]
         temp_e2[:, dim] = e2_vector[dim]
     length = np.abs(
         np.sum(rr * temp_e2, axis=1)
         * (
             1
             - np.sum(rr * temp_e0, axis=1) ** 2.0 * mag_A**-2.0
             - np.sum(rr * temp_e1, axis=1) ** 2.0 * mag_B**-2.0
         )
         ** (-0.5)
     )
     length[length == np.inf] = 0.0
     tC_index = np.nanargmax(length)
     mag_C = length[tC_index]
     # tilt is calculated from the rotation about x axis
     # needed to align e1 vector with the y axis
     # after e0 is aligned with x axis
     # find the t1 angle needed to rotate about z axis to align e0 onto x-z plane
     t1 = np.arctan(-e0_vector[1] / e0_vector[0])
     RZ = get_rotation_matrix(t1, (0, 0, 1))
     r1 = np.dot(RZ, e0_vector)
     # find the t2 angle needed to rotate about y axis to align e0 to x
     t2 = np.arctan(r1[2] / r1[0])
     RY = get_rotation_matrix(t2, (0, 1, 0))
     r2 = np.dot(RY, np.dot(RZ, e1_vector))
     # find the tilt angle needed to rotate about x axis to align e1 to y and e2 to z
     tilt = np.arctan(-r2[2] / r2[1])
     return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1], e0_vector[2], tilt)