示例#1
0
    def _check_barrel_surface(self, lens, azimuths, barrel_z):
        """
        Checks barrel surface of a lens by calculating ray-lens intersection. Hit point position and angle of incidence
        are compared to predicted ones.

        :param lens: Spherical lens object to test plane surface of.
        :param azimuths: Azimuth angles to test lens surface at.
        :param barrel_z:
        """

        lens_radius = lens.diameter / 2

        for z in barrel_z:
            for ta in azimuths:
                # get x-y coordinates of the surface point from azimuth
                x = lens_radius * cos(ta)
                y = lens_radius * sin(ta)

                # get origin by surface point offset and calculate ray direction
                surface_point = Point3D(x, y, z)
                direction = Vector3D(-x, -y, 0)
                origin = Point3D(1.1 * x, 1.1 * y, z)

                # calculate ray-lens intersection
                intersection = lens.hit(CoreRay(origin, direction))
                hit_point = intersection.hit_point.transform(
                    intersection.primitive_to_world)

                # distance of expected surface point and the ray hit point
                distance = hit_point.vector_to(surface_point).length
                self.assertAlmostEqual(
                    distance,
                    0,
                    self.tolerance_distance,
                    msg=
                    "Ray-curved surface hit point and predicted surface point difference"
                    " is larger than tolerance.")

                # angle of incidence on the sphere surface should be perpendicular
                cos_angle_incidence = intersection.normal.dot(
                    intersection.ray.direction.normalise())
                self.assertAlmostEqual(
                    fabs(cos_angle_incidence),
                    1,
                    self.tolerance_angle,
                    msg="Angle of incidence differs from perpendicular.")
示例#2
0
def find_wall_intersection(world, centre_point, sightline_vec, delta=1E-3):

    while True:

        # Find the next intersection point of the ray with the world
        intersection = world.hit(CoreRay(centre_point, sightline_vec))

        if intersection is None:
            raise ValueError('No intersection with solid material found.')

        elif isinstance(intersection.primitive.material, NullMaterial):
            centre_point += sightline_vec * delta
            continue

        else:
            hit_point = intersection.hit_point.transform(
                intersection.primitive_to_world)
            return hit_point, intersection.primitive
示例#3
0
                   acceptance_angle=1,
                   radius=0.001,
                   spectral_bins=8000,
                   spectral_rays=1,
                   pixel_samples=5,
                   transform=translate(*start_point) *
                   rotate_basis(forward_vector, up_vector),
                   parent=world)

fibre.min_wavelength = 350.0
fibre.max_wavelength = 700.0

fibre.observe()

# Find the next intersection point of the ray with the world
intersection = world.hit(CoreRay(start_point, forward_vector))
if intersection is not None:
    hit_point = intersection.hit_point.transform(
        intersection.primitive_to_world)
else:
    raise RuntimeError("No intersection with the vessel was found.")

# Traverse the ray with equation for a parametric line,
# i.e. t=0->1 traverses the ray path.
parametric_vector = start_point.vector_to(hit_point)
t_samples = np.arange(0, 1, 0.01)

# Setup some containers for useful parameters along the ray trajectory
ray_r_points = []
ray_z_points = []
distance = []
示例#4
0
x_points = []
y_points = []
z_points = []
z_show = np.zeros((num_pixels, num_pixels))
for ix in range(num_pixels):
    for iy in range(num_pixels):

        # generate pixel transform
        pixel_x = image_start_x - image_delta * ix
        pixel_y = image_start_y - image_delta * iy

        # calculate point in virtual image plane to be used for ray direction
        origin = Point3D().transform(translate(0, 0.16, -0.7) * rotate(0, -12, 0))
        direction = Vector3D(pixel_x, pixel_y, 1).normalise().transform(translate(0, 0.16, -0.7) * rotate(0, -12, 0))

        intersection = world.hit(CoreRay(origin, direction))

        if intersection is not None:
            hit_point = intersection.hit_point.transform(intersection.primitive_to_world)
            x_points.append(hit_point.z)
            y_points.append(hit_point.x)
            z_points.append(hit_point.y)
            z_show[iy, ix] = hit_point.z
        else:
            # add small offset so background is black
            z_show[iy, ix] = 0.1

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_points, y_points, z_points, c='k', marker='.')
ax.set_xlabel('X Label')
    def make_cherab_image(self):
        """
        run cherab to generate the synthetic spectral cube
        :return:
        """
        if self.radiance is not NotImplemented:
            self.radiance.close()
        if self.spectral_radiance is not NotImplemented:
            self.spectral_radiance.close()

        import_mastu_mesh(self.world, )

        # first, define camera, calculate view vectors and calculate ray lengths
        pipeline_spectral = SpectralPowerPipeline2D()
        pipeline_spectral_rad = SpectralRadiancePipeline2D()
        pipelines = [pipeline_spectral, pipeline_spectral_rad, ]
        camera = PinholeCamera(self.sensor_format_ds, fov=self.fov, pipelines=pipelines, parent=self.world)

        # orient and position the camera
        init_view_vector, init_up_vector = Vector3D(0, 0, 1), Vector3D(0, 1, 0)
        axle_1 = init_view_vector.cross(self.view_vector)
        angle = init_view_vector.angle(self.view_vector)
        t_1 = rotate_vector(angle, axle_1)

        final_up_vector = rotate_vector(-90, axle_1) * self.view_vector
        intermediate_up_vector = t_1 * init_up_vector
        angle_between = intermediate_up_vector.angle(final_up_vector)
        t_2 = rotate_vector(-angle_between, self.view_vector)

        camera.transform = translate(self.pupil_point[0],
                                     self.pupil_point[1],
                                     self.pupil_point[2], ) * t_2 * t_1

        vector_xyz = np.arange(3)
        vector_xyz = xr.DataArray(vector_xyz, coords=(vector_xyz, ), dims=('vector_xyz',), name='vector_xyz', )

        # calculating the pixel view directions
        view_vectors = xr.combine_nested(
            [xr.zeros_like(self.x_pixel_ds + self.y_pixel_ds) + self.view_vector[i] for i in [0, 1, 2, ]],
            concat_dim=(vector_xyz,), )
        view_vectors = view_vectors.rename('view_vectors')

        def v3d2da(v3d):
            """
            raysect Vector3D to xarray DataArray

            :param v3d:
            :return:
            """
            da = np.array([v3d.x, v3d.y, v3d.z, ])
            da = xr.DataArray(da, coords=(np.arange(3),), dims=('vector_xyz',), )
            return da

        # basis unit vectors defining camera view -- v_z is forward and v_y is up
        v_y = final_up_vector.normalise()
        v_x = self.view_vector.cross(v_y).normalise()
        v_z = self.view_vector.normalise()
        v_x, v_y, v_z = [v3d2da(i) for i in [v_x, v_y, v_z, ]]

        # FOV defines the widest view, with pixels defined as square.
        sensor_aspect = self.sensor_format[1] / self.sensor_format[0]
        if sensor_aspect > 1:
            fov_v = self.fov
            fov_h = self.fov / sensor_aspect
        elif sensor_aspect == 1:
            fov_v = fov_h = self.fov
        elif sensor_aspect < 1:
            fov_h = self.fov
            fov_v = self.fov * sensor_aspect
        else:
            raise Exception()

        pixel_projection = 2 * np.tan(fov_h * np.pi / 360) / self.sensor_format[0]
        view_vectors = view_vectors + (v_x * (self.x_pixel_ds - self.sensor_format[0] / 2 + 0.5) * pixel_projection) + \
                       (v_y * (self.y_pixel_ds - self.sensor_format[1] / 2 + 0.5) * pixel_projection)

        if self.verbose:
            print('--status: calculating ray lengths')
        # TODO there has to be a better way of doing this?!
        ray_lengths = xr.DataArray(np.zeros(self.sensor_format_ds), dims=('x', 'y', ), coords=(self.x_ds, self.y_ds, ))
        for idx_x, x_pixel in enumerate(self.x_pixel_ds.values):
            if self.verbose and idx_x % 10 == 0:
                print('x =', str(x_pixel))
            for idx_y, y_pixel in enumerate(self.y_pixel_ds.values):
                direction = Vector3D(*list(view_vectors.isel(x=idx_x, y=idx_y, ).values))

                intersections = []
                for p in self.world.primitives:
                    intersection = p.hit(CoreRay(self.pupil_point, direction, ))
                    if intersection is not None:
                        intersections.append(intersection)

                # find the intersection corresponding to the shortest ray length
                no_intersections = len(intersections)
                if no_intersections == 0:
                    ray_lengths.values[idx_x, idx_y] = 3
                else:
                    ray_lengths.values[idx_x, idx_y] = min([i.ray_distance for i in intersections if i.primitive.name != 'Plasma Geometry'])

        camera.spectral_bins = 40
        camera.pixel_samples = 10
        camera.min_wavelength = self.wl_min_nm
        camera.max_wavelength = self.wl_max_nm
        camera.quiet = not self.verbose
        camera.observe()

        # output to netCDF via xarray
        wl = pipeline_spectral.wavelengths
        wl = xr.DataArray(wl, coords=(wl, ), dims=('wavelength', )) * 1e-9  # ( m )
        spec_power_ds = pipeline_spectral.frame.mean * 1e9  # converting units from (W/nm) --> (W/m)
        spec_radiance_ds = pipeline_spectral_rad.frame.mean * 1e9
        coords = (self.x_ds, self.y_ds, wl, )
        dims = ('x', 'y', 'wavelength', )
        name = 'spec_power'
        attrs = {'units': 'W/m^2/str/m'}
        spec_power_ds = xr.DataArray(np.flip(spec_power_ds, axis=1), coords=coords, dims=dims, name=name, attrs=attrs, )
        spec_radiance_ds = xr.DataArray(np.flip(spec_radiance_ds, axis=1, ), coords=coords, dims=dims, name=name, attrs=attrs, )

        # calculate the centre-of-mass wavelength
        radiance_ds = spec_power_ds.integrate(dim='wavelength').assign_attrs({'units': 'W/m^2/str', })

        ds_ds = xr.Dataset({'spectral_radiance_ds': spec_radiance_ds,
                            'radiance_ds': radiance_ds,
                            'view_vectors_ds': view_vectors,
                            'ray_lengths_ds': ray_lengths
                            })

        x_p_y = self.x + self.y
        spec_power = spec_power_ds.interp_like(x_p_y) / self.cherab_down_sample  # to conserve power
        ds = xr.Dataset({'spectral_radiance': spec_power, })
        ds_ds.to_netcdf(self.fpath_ds, mode='w', )
        ds.to_netcdf(self.fpath, mode='w', )
示例#6
0
    def _check_spherical_surface(self, curvature, lens, center_curvature,
                                 is_inside, positive_curvature, azimuths,
                                 radii):
        """
        Checks barrel surface of a lens by calculating ray-lens intersection. Hit point position and angle of incidence
        are compared to predicted ones.

        :param curvature: Curvature radius of the surface
        :param lens: Spherical lens object to test plane surface of.
        :param center_curvature: Point3D with center of curvature coordinates.
        :param is_inside: If True, the lens body is within the curvature sphere.
        :param positive_curvature: Orientation of the lens surface with respect to the center of curvature. If positive,
        :param azimuths: Azimuth angles to test lens surface at.
        :param radii: Radii to test lens surface at.
        """

        # set the direction of the test ray
        if is_inside is True:
            ray_direction = 1
        elif is_inside is False:
            ray_direction = -1

        # set the sphere direction
        if positive_curvature is True:
            hemisphere = 1
        elif positive_curvature is False:
            hemisphere = -1

        curvature2 = curvature**2

        for radius in radii:
            z = sqrt(curvature2 - radius**2)
            for ta in azimuths:
                # calculate position vector pointing from the curvature center to the surface point
                x = radius * cos(ta)
                y = radius * sin(ta)
                position_vector = Vector3D(x, y, hemisphere * z)

                # construct origin by surface point offset and calculate ray direction
                surface_point = center_curvature + position_vector
                origin = center_curvature + position_vector * (
                    1 - 0.1 * ray_direction)
                direction = ray_direction * position_vector

                # calculate ray-lens intersection
                intersection = lens.hit(CoreRay(origin, direction))
                hit_point = intersection.hit_point.transform(
                    intersection.primitive_to_world)

                # distance of expected surface point and the ray hit point
                distance = hit_point.vector_to(surface_point).length
                self.assertAlmostEqual(
                    distance,
                    0,
                    self.tolerance_distance,
                    msg=
                    "Ray-curved surface hit point and predicted surface point difference"
                    " is larger than tolerance.")

                # angle of incidence on the sphere surface should be perpendicular
                cos_angle_incidence = intersection.normal.dot(
                    intersection.ray.direction.normalise())

                self.assertAlmostEqual(
                    fabs(cos_angle_incidence),
                    1,
                    self.tolerance_angle,
                    msg="Angle of incidence differs from perpendicular.")