#import cProfile # # def profile_test(n=25000): # r = Ray(origin=Point(0.0, 0, 0), min_wavelength=526, max_wavelength=532, num_samples=100) # for i in range(n): # r.trace(world) # # cProfile.run("profile_test()", sort="tottime") ion() r = Ray(origin=Point3D(0.5, 0, -2.5), min_wavelength=440, max_wavelength=740, bins=800) s = r.trace(world) plot(s.wavelengths, s.samples) r = Ray(origin=Point3D(0.5, 0, -2.5), min_wavelength=440, max_wavelength=740, bins=1600) s = r.trace(world) plot(s.wavelengths, s.samples) show() camera = PinholeCamera((128, 128), parent=world, transform=translate(0, 0, -2.5)) camera.spectral_rays = 1 camera.spectral_bins = 21 camera.pixel_samples = 50 ion() camera.observe() ioff() camera.pipelines[0].display()
# 2. Add Observer # --------------- # Process the ray-traced spectra with the RGB pipeline. rgb = RGBPipeline2D() # camera camera = PinholeCamera((512, 512), pipelines=[rgb], transform=translate(0, 10, -10) * rotate(0, -45, 0)) # camera - pixel sampling settings camera.fov = 45 camera.pixel_samples = 250 # camera - ray sampling settings camera.spectral_rays = 1 camera.spectral_bins = 20 camera.ray_max_depth = 100 camera.ray_extinction_prob = 0.1 camera.min_wavelength = 375.0 camera.max_wavelength = 740.0 # 3. Build Scenegraph # ------------------- world = World() sphere.parent = world ground.parent = world emitter.parent = world camera.parent = world
def check_scene(self, max_iter=200): self.vessel.material = Lambert(blue) self.camera_outer.material = Lambert(yellow) self.camera_top.material = Lambert(yellow) self.source.material = Lambert(green) self.top_pinhole.material = Lambert(green) self.out_pinhole.material = Lambert(green) # cube walls bottom = Box(lower=Point3D(-0.99, -1.02, -0.99), upper=Point3D(0.99, -1.01, 0.99), parent=self.world, material=Lambert(red)) # top = Box(lower=Point3D(-0.99, 1.01, -0.99), upper=Point3D(0.99, 1.02, 0.99), parent=self.world, # material=Lambert(red)) left = Box(lower=Point3D(1.01, -0.99, -0.99), upper=Point3D(1.02, 0.99, 0.99), parent=self.world, material=Lambert(yellow)) # right = Box(lower=Point3D(-1.02, -0.99, -0.99), upper=Point3D(-1.01, 0.99, 0.99), parent=self.world, # material=Lambert(purple)) back = Box(lower=Point3D(-0.99, -0.99, 1.01), upper=Point3D(0.99, 0.99, 1.02), parent=self.world, material=Lambert(orange)) # various wall light sources light_front = Box(lower=Point3D(-1.5, -1.5, -10.1), upper=Point3D(1.5, 1.5, -10), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0)) light_top = Box(lower=Point3D(-0.99, 1.01, -0.99), upper=Point3D(0.99, 1.02, 0.99), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0), transform=translate(0, 1.0, 0)) light_bottom = Box(lower=Point3D(-0.99, -3.02, -0.99), upper=Point3D(0.99, -3.01, 0.99), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0), transform=translate(0, 1.0, 0)) light_right = Box(lower=Point3D(-1.92, -0.99, -0.99), upper=Point3D(-1.91, 0.99, 0.99), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0)) light_left = Box(lower=Point3D(1.91, -0.99, -0.99), upper=Point3D(1.92, 0.99, 0.99), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0)) # Process the ray-traced spectra with the RGB pipeline. rgb = RGBPipeline2D() # camera pix = 1000 camera = PinholeCamera( (pix, pix), pipelines=[rgb], transform=translate(-0.01, 0.0, -0.25) * rotate(0, 0, 0)) # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(0.0, 0.0, 0.4) * rotate(180, 0, 0)) # top view # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(0.0, self.vessel_out_rad+0.15, self.vessel_width/2)*rotate(0, -90, 0)) # prof camera = PinholeCamera( (pix, pix), pipelines=[rgb], transform=translate(-0.13, 0.13, -0.2) * rotate(-25, -25.0, 0)) # camera top side # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(self.x_shift_top, self.top_px_first_y+0.0004, self.top_px_z-self.cam_in_radius+0.005)*rotate(0, 0, 0)) # camera top down-up # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(self.x_shift_top, self.top_px_first_y-0.01, self.vessel_width/2)*rotate(0, 90, 0)) # camera top up-down # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(self.x_shift_top-0.004, self.top_px_first_y+self.lid_top+self.tube_height-0.01, self.vessel_width/2)*rotate(0, -90, 0)) # camera out side # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(-self.vessel_out_rad-0.015, 0.000, self.vessel_width/2-self.cam_in_radius/2+0.0001)) # camera out down-up # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(self.out_px_first_x+0.005+0.005, 0.0, self.vessel_width/2)*rotate(90, 0, 0)) # camera out up-down # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(-self.vessel_out_rad-self.tube_height-0.01, 0.0, self.vessel_width/2-0.005)*rotate(-90, 0, 0)) # camera - pixel sampling settings camera.fov = 60 # 45 camera.pixel_samples = 10 # camera - ray sampling settings camera.spectral_rays = 1 camera.spectral_bins = 25 camera.parent = self.world plt.ion() p = 1 while not camera.render_complete: print("Rendering pass {}...".format(p)) camera.observe() print() p += 1 if p > max_iter: break plt.ioff() rgb.display()
for i in range(50): voxel_map[(rad < i + 1.) * (rad > i)] = i # mapping multiple grid cells to a single light source rtc.voxel_map = voxel_map # applying a voxel map # now we have only 50 light sources # creating ray transfer pipeline pipeline = RayTransferPipeline2D() # setting up the camera camera = PinholeCamera((256, 256), pipelines=[pipeline], frame_sampler=FullFrameSampler2D(), transform=translate(219., 0, 0) * rotate(90., 0., -90.), parent=world) camera.fov = 90 camera.pixel_samples = 500 camera.min_wavelength = 500. camera.max_wavelength = camera.min_wavelength + 1. camera.spectral_bins = rtc.bins # starting ray tracing camera.observe() # uncomment this to save ray transfer matrix to file # np.save('ray_transfer_map.npy', pipeline.matrix) # let's collapse the ray transfer matrix with some emission profiles # obtaining 30 images for 30 emission profiles images = [] vmax = 0 rad_inside = np.linspace(0.5, 49.5, 50) # we have only 50 light sources now shifts = np.linspace(0, 2. / 3., 30, endpoint=False) for shift in shifts:
# setting up the camera camera = PinholeCamera( (256, 256), pipelines=[pipeline], frame_sampler=FullFrameSampler2D(), transform=rotate(-15., -35, 0) * translate(-10, 50, -250), parent=world) camera.fov = 45 camera.pixel_samples = 1000 camera.min_wavelength = 500. camera.max_wavelength = camera.min_wavelength + 1. # Ray transfer matrices are calculated for a single value of wavelength, # but the number of spectral bins must be equal to the number of active voxels in the grid. # This is so because the spectral array is used to store the values of ray transfer matrix. camera.spectral_bins = rtb.bins # starting ray tracing camera.observe() # let's collapse the ray transfer matrix with some emission profiles # defining emission profiles on a 12 x 8 x 1 grid profiles = [ np.array([[[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0], [1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
def make_cherab_image(self): """ run cherab to generate the synthetic spectral cube :return: """ if self.radiance is not NotImplemented: self.radiance.close() if self.spectral_radiance is not NotImplemented: self.spectral_radiance.close() import_mastu_mesh(self.world, ) # first, define camera, calculate view vectors and calculate ray lengths pipeline_spectral = SpectralPowerPipeline2D() pipeline_spectral_rad = SpectralRadiancePipeline2D() pipelines = [pipeline_spectral, pipeline_spectral_rad, ] camera = PinholeCamera(self.sensor_format_ds, fov=self.fov, pipelines=pipelines, parent=self.world) # orient and position the camera init_view_vector, init_up_vector = Vector3D(0, 0, 1), Vector3D(0, 1, 0) axle_1 = init_view_vector.cross(self.view_vector) angle = init_view_vector.angle(self.view_vector) t_1 = rotate_vector(angle, axle_1) final_up_vector = rotate_vector(-90, axle_1) * self.view_vector intermediate_up_vector = t_1 * init_up_vector angle_between = intermediate_up_vector.angle(final_up_vector) t_2 = rotate_vector(-angle_between, self.view_vector) camera.transform = translate(self.pupil_point[0], self.pupil_point[1], self.pupil_point[2], ) * t_2 * t_1 vector_xyz = np.arange(3) vector_xyz = xr.DataArray(vector_xyz, coords=(vector_xyz, ), dims=('vector_xyz',), name='vector_xyz', ) # calculating the pixel view directions view_vectors = xr.combine_nested( [xr.zeros_like(self.x_pixel_ds + self.y_pixel_ds) + self.view_vector[i] for i in [0, 1, 2, ]], concat_dim=(vector_xyz,), ) view_vectors = view_vectors.rename('view_vectors') def v3d2da(v3d): """ raysect Vector3D to xarray DataArray :param v3d: :return: """ da = np.array([v3d.x, v3d.y, v3d.z, ]) da = xr.DataArray(da, coords=(np.arange(3),), dims=('vector_xyz',), ) return da # basis unit vectors defining camera view -- v_z is forward and v_y is up v_y = final_up_vector.normalise() v_x = self.view_vector.cross(v_y).normalise() v_z = self.view_vector.normalise() v_x, v_y, v_z = [v3d2da(i) for i in [v_x, v_y, v_z, ]] # FOV defines the widest view, with pixels defined as square. sensor_aspect = self.sensor_format[1] / self.sensor_format[0] if sensor_aspect > 1: fov_v = self.fov fov_h = self.fov / sensor_aspect elif sensor_aspect == 1: fov_v = fov_h = self.fov elif sensor_aspect < 1: fov_h = self.fov fov_v = self.fov * sensor_aspect else: raise Exception() pixel_projection = 2 * np.tan(fov_h * np.pi / 360) / self.sensor_format[0] view_vectors = view_vectors + (v_x * (self.x_pixel_ds - self.sensor_format[0] / 2 + 0.5) * pixel_projection) + \ (v_y * (self.y_pixel_ds - self.sensor_format[1] / 2 + 0.5) * pixel_projection) if self.verbose: print('--status: calculating ray lengths') # TODO there has to be a better way of doing this?! ray_lengths = xr.DataArray(np.zeros(self.sensor_format_ds), dims=('x', 'y', ), coords=(self.x_ds, self.y_ds, )) for idx_x, x_pixel in enumerate(self.x_pixel_ds.values): if self.verbose and idx_x % 10 == 0: print('x =', str(x_pixel)) for idx_y, y_pixel in enumerate(self.y_pixel_ds.values): direction = Vector3D(*list(view_vectors.isel(x=idx_x, y=idx_y, ).values)) intersections = [] for p in self.world.primitives: intersection = p.hit(CoreRay(self.pupil_point, direction, )) if intersection is not None: intersections.append(intersection) # find the intersection corresponding to the shortest ray length no_intersections = len(intersections) if no_intersections == 0: ray_lengths.values[idx_x, idx_y] = 3 else: ray_lengths.values[idx_x, idx_y] = min([i.ray_distance for i in intersections if i.primitive.name != 'Plasma Geometry']) camera.spectral_bins = 40 camera.pixel_samples = 10 camera.min_wavelength = self.wl_min_nm camera.max_wavelength = self.wl_max_nm camera.quiet = not self.verbose camera.observe() # output to netCDF via xarray wl = pipeline_spectral.wavelengths wl = xr.DataArray(wl, coords=(wl, ), dims=('wavelength', )) * 1e-9 # ( m ) spec_power_ds = pipeline_spectral.frame.mean * 1e9 # converting units from (W/nm) --> (W/m) spec_radiance_ds = pipeline_spectral_rad.frame.mean * 1e9 coords = (self.x_ds, self.y_ds, wl, ) dims = ('x', 'y', 'wavelength', ) name = 'spec_power' attrs = {'units': 'W/m^2/str/m'} spec_power_ds = xr.DataArray(np.flip(spec_power_ds, axis=1), coords=coords, dims=dims, name=name, attrs=attrs, ) spec_radiance_ds = xr.DataArray(np.flip(spec_radiance_ds, axis=1, ), coords=coords, dims=dims, name=name, attrs=attrs, ) # calculate the centre-of-mass wavelength radiance_ds = spec_power_ds.integrate(dim='wavelength').assign_attrs({'units': 'W/m^2/str', }) ds_ds = xr.Dataset({'spectral_radiance_ds': spec_radiance_ds, 'radiance_ds': radiance_ds, 'view_vectors_ds': view_vectors, 'ray_lengths_ds': ray_lengths }) x_p_y = self.x + self.y spec_power = spec_power_ds.interp_like(x_p_y) / self.cherab_down_sample # to conserve power ds = xr.Dataset({'spectral_radiance': spec_power, }) ds_ds.to_netcdf(self.fpath_ds, mode='w', ) ds.to_netcdf(self.fpath, mode='w', )