1.0, world, transform=translate(0.5, 5, 2) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 1.0)) rgb = RGBPipeline2D(name="sRGB") sampler = RGBAdaptiveSampler2D(rgb, ratio=10, fraction=0.2, min_samples=500, cutoff=0.05) # observer camera = PinholeCamera((512, 256), fov=42, parent=world, transform=translate(0, 3.3, 0) * rotate(0, -47, 0), pipelines=[rgb], frame_sampler=sampler) camera.spectral_bins = 25 camera.pixel_samples = 250 # start ray tracing ion() p = 1 while not camera.render_complete: print("Rendering pass {}...".format(p)) camera.observe() print() p += 1 ioff()
Point3D(0.10 - glass_thickness - padding, 0.0 - glass_thickness - padding, 0.10 - glass_thickness - padding), material=UniformVolumeEmitter(d65_white, 50), parent=light_box) # CAMERA rgb = RGBPipeline2D(display_unsaturated_fraction=0.96, name="sRGB") sampler = RGBAdaptiveSampler2D(rgb, ratio=10, fraction=0.2, min_samples=2000, cutoff=0.01) camera = PinholeCamera((1024, 1024), parent=world, transform=translate(0, 0.16, -0.4) * rotate(0, -12, 0), pipelines=[rgb], frame_sampler=sampler) camera.spectral_rays = 15 camera.spectral_bins = 15 camera.pixel_samples = 250 camera.ray_max_depth = 500 camera.ray_extinction_min_depth = 3 camera.ray_extinction_prob = 0.01 # RAY TRACE ion() name = 'standford_bunny' timestamp = time.strftime("%Y-%m-%d_%H-%M-%S") render_pass = 1 while not camera.render_complete:
# Parabola(radius=0.25, height=0.5, parent=world, material=Gold(), transform=translate(0, 0, 0)*rotate(0, -90, 0)) Parabola(radius=0.1, height=0.2, parent=world, material=schott("N-BK7"), transform=translate(0, 0, 0) * rotate(0, 100, 0)) # Sphere(radius=0.1, parent=world, material=Dielectric(ConstantSF(1.0), ConstantSF(0.02)), transform=translate(0, 0, 0) * rotate(0, 0, 0)) # Parabola(radius=1000, height=0.1, parent=world, material=schott("N-BK7"), transform=translate(0, 0, 0) * rotate(0, 0, 0)) # Parabola(radius=0.1, height=0.05, parent=world, material=Lambert(), transform=translate(0, 0, 0) * rotate(270, 0, 0)) # Background Checkerboard Box(Point3D(-50.0, -50.0, 50), Point3D(50.0, 50.0, 50.1), world, material=Checkerboard(10, d65_white, d65_white, 0.4, 0.8)) # Instantiate camera object, and configure its settings. plt.ion() camera = PinholeCamera((256, 256), fov=45, parent=world, transform=translate(0.5, 0, -0.5) * rotate(45, 0, 0)) camera.pixel_samples = 50 camera.spectral_rays = 1 camera.spectral_bins = 20 # Start ray tracing camera.observe() plt.ioff() plt.show()
# create 50 ring-shaped light sources using the voxel map rad_circle = 50. xsqr = np.linspace(-49.5, 49.5, 100) ** 2 rad = np.sqrt(xsqr[:, None] + xsqr[None, :]) voxel_map = np.zeros((100, 100), dtype=np.int) voxel_map[rad > 50.] = -1 # removing the area outside the circle for i in range(50): voxel_map[(rad < i + 1.) * (rad > i)] = i # mapping multiple grid cells to a single light source rtc.voxel_map = voxel_map # applying a voxel map # now we have only 50 light sources # creating ray transfer pipeline pipeline = RayTransferPipeline2D() # setting up the camera camera = PinholeCamera((256, 256), pipelines=[pipeline], frame_sampler=FullFrameSampler2D(), transform=translate(219., 0, 0) * rotate(90., 0., -90.), parent=world) camera.fov = 90 camera.pixel_samples = 500 camera.min_wavelength = 500. camera.max_wavelength = camera.min_wavelength + 1. camera.spectral_bins = rtc.bins # starting ray tracing camera.observe() # uncomment this to save ray transfer matrix to file # np.save('ray_transfer_map.npy', pipeline.matrix) # let's collapse the ray transfer matrix with some emission profiles # obtaining 30 images for 30 emission profiles
wvl_range = spectrum.min_wavelength - spectrum.max_wavelength shift = 2 * (spectrum.wavelengths - wvl_centre) / wvl_range radius = sqrt(point.x**2 + point.y**2) spectrum.samples += cos((shift + 5) * radius)**4 return spectrum # scene world = World() emitter = Box(Point3D(-1, -1, -0.25), Point3D(1, 1, 0.25), material=CosGlow(), parent=world, transform=translate(0, 1, 0) * rotate(30, 0, 0)) floor = Box(Point3D(-100, -0.1, -100), Point3D(100, 0, 100), world, material=RoughTitanium(0.1)) # camera rgb_pipeline = RGBPipeline2D(display_update_time=5) sampler = RGBAdaptiveSampler2D(rgb_pipeline, min_samples=100, fraction=0.2) camera = PinholeCamera((512, 512), parent=world, transform=translate(0, 4, -3.5) * rotate(0, -45, 0), pipelines=[rgb_pipeline], frame_sampler=sampler) camera.spectral_bins = 15 camera.spectral_rays = 1 camera.pixel_samples = 200 camera.render_engine = MulticoreEngine(4) # integration resolution emitter.material.integrator.step = 0.05 # start ray tracing os.nice(15) ion() timestamp = time.strftime("%Y-%m-%d_%H-%M-%S") for p in range(1, 1000): print("Rendering pass {}...".format(p)) camera.observe()
emitter = Box(Point3D(-1, -1, -0.25), Point3D(1, 1, 0.25), material=CosGlow(), parent=world, transform=translate(0, 1, 0) * rotate(30, 0, 0)) floor = Box(Point3D(-100, -0.1, -100), Point3D(100, 0, 100), world, material=RoughTitanium(0.1)) # camera rgb_pipeline = RGBPipeline2D(display_update_time=5) sampler = RGBAdaptiveSampler2D(rgb_pipeline, min_samples=100, fraction=0.2) camera = PinholeCamera((512, 512), parent=world, transform=translate(0, 4, -3.5) * rotate(0, -45, 0), pipelines=[rgb_pipeline], frame_sampler=sampler) camera.spectral_bins = 15 camera.spectral_rays = 1 camera.pixel_samples = 200 camera.render_engine = MulticoreEngine(4) # integration resolution emitter.material.integrator.step = 0.05 # start ray tracing os.nice(15) ion() timestamp = time.strftime("%Y-%m-%d_%H-%M-%S") for p in range(1, 1000):
from raysect.optical.library import Gold, Silver, Copper, Titanium, Aluminium from raysect.optical.material import Lambert, UniformSurfaceEmitter world = World() Sphere(0.5, world, transform=translate(1.2, 0.5001, 0.6), material=Gold()) Sphere(0.5, world, transform=translate(0.6, 0.5001, -0.6), material=Silver()) Sphere(0.5, world, transform=translate(0, 0.5001, 0.6), material=Copper()) Sphere(0.5, world, transform=translate(-0.6, 0.5001, -0.6), material=Titanium()) Sphere(0.5, world, transform=translate(-1.2, 0.5001, 0.6), material=Aluminium()) Box(Point3D(-100, -0.1, -100), Point3D(100, 0, 100), world, material=Lambert(ConstantSF(1.0))) Cylinder(3.0, 8.0, world, transform=translate(4, 8, 0) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 1.0)) camera = PinholeCamera((512, 512), parent=world, transform=translate(0, 4, -3.5) * rotate(0, -48, 0)) camera.spectral_bins = 15 camera.pixel_samples = 100 # start ray tracing ion() for p in range(1, 1000): print("Rendering pass {}...".format(p)) camera.observe() camera.pipelines[0].save("demo_metal_{}_samples.png".format(p)) print() # display final result ioff() camera.display() show()
Point3D(1, 1, 0.25), material=VolumeTransform(CosGlow(), rotate(0, 90, 0)), parent=world, transform=translate(-3.2, 1, 0) * rotate(30, 0, 0)) floor = Box(Point3D(-100, -0.1, -100), Point3D(100, 0, 100), world, material=RoughTitanium(0.1)) # camera rgb_pipeline = RGBPipeline2D(display_update_time=5) sampler = RGBAdaptiveSampler2D(rgb_pipeline, min_samples=100, fraction=0.2) camera = PinholeCamera((512, 256), parent=world, transform=translate(0, 6.5, -10) * rotate(0, -30, 0), pipelines=[rgb_pipeline], frame_sampler=sampler) camera.spectral_bins = 15 camera.spectral_rays = 1 camera.pixel_samples = 100 # integration resolution box_unshifted.material.integrator.step = 0.05 box_down_shifted.material.material.integrator.step = 0.05 box_up_shifted.material.material.integrator.step = 0.05 box_rotated.material.material.integrator.step = 0.05 # start ray tracing ion() timestamp = time.strftime("%Y-%m-%d_%H-%M-%S")
# Box defining the floor ground = Box(lower=Point3D(-100, -14.2, -100), upper=Point3D(100, -14.1, 100), material=RoughAluminium(0.05)) # Box defining the isotropic emitter emitterIsotropic = Box(lower=Point3D(-25, -10, -10), upper=Point3D(-5, 10, 10), material=UniformSurfaceEmitter(d65_white), transform=rotate(0, 0, 0)) # Box defining the anisotropic emitter emitterAnisotropic = Box(lower=Point3D(5, -10, -10), upper=Point3D(25, 10, 10), material=BlueYellowEmitter(), transform=rotate(0, 0, 0)) # 2. Add Observer # --------------- # camera camera = PinholeCamera((384, 384), transform=translate(0, 0, -80)) camera.fov = 45 camera.pixel_samples = 250 camera.pipelines[0].accumulate = False # 3. Build Scenegraph # ------------------- world = World() ground.parent = world emitterIsotropic.parent = world emitterAnisotropic.parent = world camera.parent = world
from cherab.openadas import OpenADAS from cherab.compass import load_core_plasma_from_files psirz_file = os.path.join(os.path.dirname(__file__), "data/psi_RZ.txt") psin_file = os.path.join(os.path.dirname(__file__), "data/psi.txt") electron_file = os.path.join(os.path.dirname(__file__), "data/electrons.txt") carbon_file = os.path.join(os.path.dirname(__file__), "data/abunC.txt") world = World() adas = OpenADAS(permit_extrapolation=True) plasma = load_core_plasma_from_files(world, adas, psirz_file, psin_file, electron_file, carbon_file) ciii_465 = Line(carbon, 2, (11, 10)) # wavelength=465.01 plasma.models = [ExcitationLine(ciii_465)] cam = PinholeCamera((512, 512), parent=world, transform=translate(-2, 0, 0) * rotate_basis(Vector3D(1, 0, 0), Vector3D(0, 0, 1))) cam.pixel_samples = 1 cam.observe() cam = PinholeCamera((512, 512), parent=world, transform=translate(-1.2, 0.4, 0) * rotate_basis(Vector3D(1, 0, 0), Vector3D(0, 0, 1))) cam.pixel_samples = 1 cam.observe()
Box(Point3D(-10, -10, 4.0), Point3D(10, 10, 4.1), world, material=Checkerboard(1, d65_white, d65_white, 0.2, 0.8)) # Build a CSG primitive from a number of basic underlying primitives cyl_x = Cylinder(1, 4.2, transform=rotate(90, 0, 0)*translate(0, 0, -2.1)) cyl_y = Cylinder(1, 4.2, transform=rotate(0, 90, 0)*translate(0, 0, -2.1)) cyl_z = Cylinder(1, 4.2, transform=rotate(0, 0, 0)*translate(0, 0, -2.1)) cube = Box(Point3D(-1.5, -1.5, -1.5), Point3D(1.5, 1.5, 1.5)) sphere = Sphere(2.0) target = Intersect(sphere, cube, parent=world, transform=translate(0, 0, 0)*rotate(0, 0, 0), material=schott("N-BK7")) # create and setup the camera camera = PinholeCamera((256, 256), fov=45, parent=world, transform=translate(0, 0, -6) * rotate(0, 0, 0)) camera.spectral_rays = 9 camera.spectral_bins = 30 rgb = camera.pipelines[0] # for each frame rotate the CSG primitive and re-render num_frames = 25*20 full_rotation = 360 for frame in range(num_frames): print("Rendering frame {}:".format(frame)) rotation = full_rotation / num_frames * frame target.transform = rotate(rotation, 25, 5) camera.observe()
plt.ylabel('z axis (beam coords)') plt.title("Beam full energy density profile in r-z plane") z = np.linspace(0, 3, 200) beam_full_densities = [beam.density(0, 0, zz) for zz in z] plt.figure() plt.plot(z, beam_full_densities, label="full energy") plt.xlabel('z axis (beam coords)') plt.ylabel('beam component density [m^-3]') plt.title("Beam attenuation by energy component") plt.legend() # OBSERVATIONS ---------------------------------------------------------------- camera = PinholeCamera((128, 128), parent=world, transform=translate(0.25, -1.25, 0) * rotate_basis(Vector3D(0, 1, 0), Vector3D(0, 0, 1))) camera.spectral_rays = 1 camera.spectral_bins = 15 camera.pixel_samples = 5 # turning off parallisation because this causes issues with the way RENATE currently loads atomic data from raysect.core.workflow import SerialEngine camera.render_engine = SerialEngine() plt.ion() camera.observe() power = PowerPipeline0D(accumulate=False) spectral_power = SpectralPowerPipeline0D() los = SightLine(pipelines=[power, spectral_power], min_wavelength=582, max_wavelength=586, parent=world, transform=translate(*los_start) * rotate_basis(los_direction, Vector3D(0, 0, 1)))
node = Node(parent=world, transform=rotate(0, 0, 90)) Box(Point3D(-0.5, 0, -2.5), Point3D(0.5, 0.25, 0.5), node, rotate(0, 0, 0) * translate(0, 1, -0.500001), red_glass) Box(Point3D(-0.5, 0, -2.5), Point3D(0.5, 0.25, 0.5), node, rotate(0, 0, 60) * translate(0, 1, -0.500001), yellow_glass) Box(Point3D(-0.5, 0, -2.5), Point3D(0.5, 0.25, 0.5), node, rotate(0, 0, 120) * translate(0, 1, -0.500001), green_glass) Box(Point3D(-0.5, 0, -2.5), Point3D(0.5, 0.25, 0.5), node, rotate(0, 0, 180) * translate(0, 1, -0.500001), cyan_glass) Box(Point3D(-0.5, 0, -2.5), Point3D(0.5, 0.25, 0.5), node, rotate(0, 0, 240) * translate(0, 1, -0.500001), blue_glass) Box(Point3D(-0.5, 0, -2.5), Point3D(0.5, 0.25, 0.5), node, rotate(0, 0, 300) * translate(0, 1, -0.500001), purple_glass) camera = PinholeCamera((256, 256), fov=45, parent=world, transform=translate(0, 0, -6.5) * rotate(0, 0, 0)) ion() camera.ray_max_depth = 500 camera.ray_extinction_prob = 0.01 camera.pixel_samples = 100 camera.spectral_rays = 1 camera.spectral_bins = 21 camera.observe() ioff() camera.pipelines[0].save("raysect_logo.png") camera.pipelines[0].display() show()
from matplotlib.pyplot import * from numpy import array from raysect.primitive import Sphere, Box, Cylinder, Union, Intersect, Subtract from raysect.optical import World, translate, rotate, Point3D, d65_white, InterpolatedSF from raysect.optical.material.emitter import UniformSurfaceEmitter, Checkerboard from raysect.optical.material.dielectric import Dielectric, Sellmeier from raysect.optical.library import schott from raysect.optical.observer import PinholeCamera, RGBPipeline2D, SpectralPowerPipeline2D world = World() cyl_x = Cylinder(1, 4.2, transform=rotate(90, 0, 0) * translate(0, 0, -2.1)) cyl_y = Cylinder(1, 4.2, transform=rotate(0, 90, 0) * translate(0, 0, -2.1)) cyl_z = Cylinder(1, 4.2, transform=rotate(0, 0, 0) * translate(0, 0, -2.1)) # Intersect(sphere, Subtract(cube, Union(Union(cyl_x, cyl_y), cyl_z)), world, translate(-2.1,2.1,2.5)*rotate(30, -20, 0), schott("N-LAK9")) # Union(Union(cyl_x, cyl_y), cyl_z, world) Intersect(Intersect(cyl_x, cyl_y), cyl_z, world) # Intersect(cyl_x, cyl_y, world) # Union(cyl_y, cyl_z, world) # Union(cyl_z, cyl_x, world) camera = PinholeCamera((256, 256), parent=world, transform=translate(0, 0, -4) * rotate(0, 0, 0)) from raysect.vtk import visualise_scenegraph visualise_scenegraph(camera)
plt.colorbar() plt.axis('equal') plt.xlabel('r axis') plt.ylabel('z axis') plt.title("Neutral Density profile in r-z plane") plt.figure() xrange = np.linspace(0, 4, 200) yrange = np.linspace(-2, 2, 200) d_alpha_rz_intensity = np.zeros((200, 200)) direction = Vector3D(0, 1, 0) for i, x in enumerate(xrange): for j, y in enumerate(yrange): emission = d_alpha_excit.emission(Point3D(x, 0.0, y), direction, Spectrum(650, 660, 1)) d_alpha_rz_intensity[j, i] = emission.total() plt.imshow(d_alpha_rz_intensity, extent=[0, 4, -2, 2], origin='lower') plt.colorbar() plt.xlabel('r axis') plt.ylabel('z axis') plt.title("D-alpha emission in R-Z") camera = PinholeCamera((256, 256), pipelines=[PowerPipeline2D()], parent=world) camera.transform = translate(2.5, -4.5, 0)*rotate_basis(Vector3D(0, 1, 0), Vector3D(0, 0, 1)) camera.pixel_samples = 1 plt.ion() camera.observe() plt.ioff() plt.show()
Box(lower=Point3D(-3, -0.1, -0.5), upper=Point3D(3, 0.1, 0.5), parent=world, transform=translate(0, 0.7, 1)*rotate(0, 32, 0), material=RoughAluminium(0.005)) Box(lower=Point3D(-3, -0.1, -0.5), upper=Point3D(3, 0.1, 0.5), parent=world, transform=translate(0, 0.05, 0)*rotate(0, 24.5, 0), material=RoughAluminium(0.03)) Box(lower=Point3D(-3, -0.1, -0.5), upper=Point3D(3, 0.1, 0.5), parent=world, transform=translate(0, -0.5, -1)*rotate(0, 19, 0), material=RoughAluminium(0.1)) ion() # Light sampling light_sampling = RGBPipeline2D(name="Light Sampling") light_sampling.display_sensitivity = 200 light_sampling.accumulate = False camera = PinholeCamera(pixels, fov=45, parent=world, transform=translate(0, 1, -10) * rotate(0, 0, 0), pipelines=[light_sampling]) camera.pixel_samples = samples camera.ray_importance_sampling = True camera.ray_important_path_weight = 1.0 camera.observe() # BRDF sampling brdf_sampling = RGBPipeline2D(name="BRDF Sampling") brdf_sampling.display_sensitivity = 200 brdf_sampling.accumulate = False camera = PinholeCamera(pixels, fov=45, parent=world, transform=translate(0, 1, -10) * rotate(0, 0, 0), pipelines=[brdf_sampling]) camera.pixel_samples = samples camera.ray_importance_sampling = False camera.observe() # MIS sampling
# ############################### OBSERVATION ############################### # print('Observation') duct_pos = Point3D(0.539, -1.926, 0) #Position of beam south_pos.vector_to(duct_pos) beam_axis = south_pos.vector_to(duct_pos).normalise() los = Point3D(-1.742, 1.564, 0.179) #camera position direction = Vector3D(0.919, -0.389, -0.057).normalise() #optical axis los = los + direction * 0.9 up = Vector3D(1, 1, 1) translate(los.x, los.y, los.z) * rotate_basis(direction, up) camera = PinholeCamera((256, 256), fov=52.9, parent=world, transform=translate(0, 0, 3) * rotate_basis(Vector3D(0, 0, -1), Vector3D(1, 0, 0))) camera.pixel_samples = 50 camera.spectral_bins = 15 camera.render_engine = SerialEngine() camera.observe() plt.figure() plt.plot(length, d_gas_density, label='d') plt.plot(length, c_gas_density, label='c') plt.plot(length, e_gas_density, label='e') plt.legend() plt.xlabel('Distance along beam line (m)') plt.ylabel('density')
Cylinder(0.5, 1.0, world, transform=translate(0.5, 5, 2) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 0.5)) # observer rgb = RGBPipeline2D(display_unsaturated_fraction=0.85) sampler = RGBAdaptiveSampler2D(rgb, ratio=10, fraction=0.2, min_samples=500, cutoff=0.01) camera = PinholeCamera((1024, 512), pipelines=[rgb], frame_sampler=sampler, transform=translate(0, 3.3, 0) * rotate(0, -47, 0), fov=42, parent=world) camera.spectral_rays = 1 camera.spectral_bins = 15 camera.pixel_samples = 250 # start ray tracing ion() name = 'modifier_add' timestamp = time.strftime("%Y-%m-%d_%H-%M-%S") render_pass = 1 while not camera.render_complete: print("Rendering pass {}...".format(render_pass)) camera.observe()
def make_cherab_image(self): """ run cherab to generate the synthetic spectral cube :return: """ if self.radiance is not NotImplemented: self.radiance.close() if self.spectral_radiance is not NotImplemented: self.spectral_radiance.close() import_mastu_mesh(self.world, ) # first, define camera, calculate view vectors and calculate ray lengths pipeline_spectral = SpectralPowerPipeline2D() pipeline_spectral_rad = SpectralRadiancePipeline2D() pipelines = [pipeline_spectral, pipeline_spectral_rad, ] camera = PinholeCamera(self.sensor_format_ds, fov=self.fov, pipelines=pipelines, parent=self.world) # orient and position the camera init_view_vector, init_up_vector = Vector3D(0, 0, 1), Vector3D(0, 1, 0) axle_1 = init_view_vector.cross(self.view_vector) angle = init_view_vector.angle(self.view_vector) t_1 = rotate_vector(angle, axle_1) final_up_vector = rotate_vector(-90, axle_1) * self.view_vector intermediate_up_vector = t_1 * init_up_vector angle_between = intermediate_up_vector.angle(final_up_vector) t_2 = rotate_vector(-angle_between, self.view_vector) camera.transform = translate(self.pupil_point[0], self.pupil_point[1], self.pupil_point[2], ) * t_2 * t_1 vector_xyz = np.arange(3) vector_xyz = xr.DataArray(vector_xyz, coords=(vector_xyz, ), dims=('vector_xyz',), name='vector_xyz', ) # calculating the pixel view directions view_vectors = xr.combine_nested( [xr.zeros_like(self.x_pixel_ds + self.y_pixel_ds) + self.view_vector[i] for i in [0, 1, 2, ]], concat_dim=(vector_xyz,), ) view_vectors = view_vectors.rename('view_vectors') def v3d2da(v3d): """ raysect Vector3D to xarray DataArray :param v3d: :return: """ da = np.array([v3d.x, v3d.y, v3d.z, ]) da = xr.DataArray(da, coords=(np.arange(3),), dims=('vector_xyz',), ) return da # basis unit vectors defining camera view -- v_z is forward and v_y is up v_y = final_up_vector.normalise() v_x = self.view_vector.cross(v_y).normalise() v_z = self.view_vector.normalise() v_x, v_y, v_z = [v3d2da(i) for i in [v_x, v_y, v_z, ]] # FOV defines the widest view, with pixels defined as square. sensor_aspect = self.sensor_format[1] / self.sensor_format[0] if sensor_aspect > 1: fov_v = self.fov fov_h = self.fov / sensor_aspect elif sensor_aspect == 1: fov_v = fov_h = self.fov elif sensor_aspect < 1: fov_h = self.fov fov_v = self.fov * sensor_aspect else: raise Exception() pixel_projection = 2 * np.tan(fov_h * np.pi / 360) / self.sensor_format[0] view_vectors = view_vectors + (v_x * (self.x_pixel_ds - self.sensor_format[0] / 2 + 0.5) * pixel_projection) + \ (v_y * (self.y_pixel_ds - self.sensor_format[1] / 2 + 0.5) * pixel_projection) if self.verbose: print('--status: calculating ray lengths') # TODO there has to be a better way of doing this?! ray_lengths = xr.DataArray(np.zeros(self.sensor_format_ds), dims=('x', 'y', ), coords=(self.x_ds, self.y_ds, )) for idx_x, x_pixel in enumerate(self.x_pixel_ds.values): if self.verbose and idx_x % 10 == 0: print('x =', str(x_pixel)) for idx_y, y_pixel in enumerate(self.y_pixel_ds.values): direction = Vector3D(*list(view_vectors.isel(x=idx_x, y=idx_y, ).values)) intersections = [] for p in self.world.primitives: intersection = p.hit(CoreRay(self.pupil_point, direction, )) if intersection is not None: intersections.append(intersection) # find the intersection corresponding to the shortest ray length no_intersections = len(intersections) if no_intersections == 0: ray_lengths.values[idx_x, idx_y] = 3 else: ray_lengths.values[idx_x, idx_y] = min([i.ray_distance for i in intersections if i.primitive.name != 'Plasma Geometry']) camera.spectral_bins = 40 camera.pixel_samples = 10 camera.min_wavelength = self.wl_min_nm camera.max_wavelength = self.wl_max_nm camera.quiet = not self.verbose camera.observe() # output to netCDF via xarray wl = pipeline_spectral.wavelengths wl = xr.DataArray(wl, coords=(wl, ), dims=('wavelength', )) * 1e-9 # ( m ) spec_power_ds = pipeline_spectral.frame.mean * 1e9 # converting units from (W/nm) --> (W/m) spec_radiance_ds = pipeline_spectral_rad.frame.mean * 1e9 coords = (self.x_ds, self.y_ds, wl, ) dims = ('x', 'y', 'wavelength', ) name = 'spec_power' attrs = {'units': 'W/m^2/str/m'} spec_power_ds = xr.DataArray(np.flip(spec_power_ds, axis=1), coords=coords, dims=dims, name=name, attrs=attrs, ) spec_radiance_ds = xr.DataArray(np.flip(spec_radiance_ds, axis=1, ), coords=coords, dims=dims, name=name, attrs=attrs, ) # calculate the centre-of-mass wavelength radiance_ds = spec_power_ds.integrate(dim='wavelength').assign_attrs({'units': 'W/m^2/str', }) ds_ds = xr.Dataset({'spectral_radiance_ds': spec_radiance_ds, 'radiance_ds': radiance_ds, 'view_vectors_ds': view_vectors, 'ray_lengths_ds': ray_lengths }) x_p_y = self.x + self.y spec_power = spec_power_ds.interp_like(x_p_y) / self.cherab_down_sample # to conserve power ds = xr.Dataset({'spectral_radiance': spec_power, }) ds_ds.to_netcdf(self.fpath_ds, mode='w', ) ds.to_netcdf(self.fpath, mode='w', )
pini_8_1 = load_pini_from_ppf(PULSE, '8.1', plasma, adas, attenuation_instructions, beam_emission_instructions, world) pini_8_2 = load_pini_from_ppf(PULSE, '8.2', plasma, adas, attenuation_instructions, beam_emission_instructions, world) pini_8_5 = load_pini_from_ppf(PULSE, '8.5', plasma, adas, attenuation_instructions, beam_emission_instructions, world) pini_8_6 = load_pini_from_ppf(PULSE, '8.6', plasma, adas, attenuation_instructions, beam_emission_instructions, world) # ############################### OBSERVATION ############################### # print('Observation') los = Point3D(4.22950, -0.791368, 0.269430) direction = Vector3D(-0.760612, -0.648906, -0.0197396).normalise() los = los + direction * 0.9 up = Vector3D(0, 0, 1) camera = PinholeCamera( (512, 512), fov=45, parent=world, transform=translate(los.x, los.y, los.z) * rotate_basis(direction, up)) camera.pixel_samples = 50 camera.spectral_bins = 15 camera.observe()
glass = Subtract(glass_outer, glass_inner, material=schott("N-BK7"), parent=light_box) emitter = Box(Point3D(-0.10 + glass_thickness + padding, -0.02 + glass_thickness + padding, -0.10 + glass_thickness + padding), Point3D(0.10 - glass_thickness - padding, 0.0 - glass_thickness - padding, 0.10 - glass_thickness - padding), material=UniformVolumeEmitter(d65_white, 50), parent=light_box) # CAMERA ion() camera = PinholeCamera((256, 256), fov=40, parent=world, transform=translate(0, 0.16, -0.4) * rotate(0, -12, 0)) camera.pixel_samples = 50 camera.spectral_rays = 15 camera.spectral_bins = 15 camera.observe() ioff() rgb = camera.pipelines[0] rgb.save("stanford_bunny_{}.png".format(time.strftime("%Y-%m-%d_%H-%M-%S"))) show()
s2 = Sphere(0.5, transform=translate(0, 0, -0.5+0.01)) Intersect(s1, s2, world, translate(0,0,-3.6)*rotate(50,50,0), schott("N-BK7")) Box(Point3D(-50, -50, 50), Point3D(50, 50, 50.1), world, material=Checkerboard(4, d65_white, d65_white, 0.4, 0.8)) Box(Point3D(-100, -100, -100), Point3D(100, 100, 100), world, material=UniformSurfaceEmitter(d65_white, 0.1)) ion() # create and setup the camera rgb = RGBPipeline2D() rgb.accumulate = True spectral = SpectralPowerPipeline2D() spectral.accumulate = True pipelines = [rgb] camera = PinholeCamera((256, 256), parent=world, transform=translate(0, 0, -4) * rotate(0, 0, 0), pipelines=pipelines) camera.pixel_samples = 250 camera.fov = 75 camera.spectral_bins = 15 camera.spectral_rays = 1 camera.observe() ioff() # camera.pipelines[0].save("render.png") camera.pipelines[0].display() show()
prism_light = light_box(parent=world, transform=rotate(-35.5, 0, 0) * translate(0.10, 0, 0) * rotate(90, 0, 0)) # background light source top_light = Sphere(0.5, parent=world, transform=translate(0, 2, -1), material=UniformSurfaceEmitter(d65_white, scale=2)) # Give the prism a high importance to ensure adequate sampling prism.material.importance = 9 rgb = RGBPipeline2D() # create and setup the camera camera = PinholeCamera((512, 256), fov=45, parent=world, pipelines=[rgb]) camera.transform = translate(0, 0.05, -0.05) * rotate(180, -65, 0) * translate(0, 0, -0.75) camera.ray_importance_sampling = True camera.ray_important_path_weight = 0.75 camera.ray_max_depth = 500 camera.ray_extinction_prob = 0.01 camera.spectral_bins = 32 camera.spectral_rays = 32 camera.pixel_samples = 100 # start ray tracing plt.ion() for p in range(0, 1000): print("Rendering pass {}".format(p+1)) camera.observe()
1.0, world, transform=translate(0.5, 5, 4) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 1.0)) Cylinder(0.5, 1.0, world, transform=translate(0.5, 5, 2) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 1.0)) rgb = RGBPipeline2D(display_unsaturated_fraction=0.96, name="sRGB") # observer camera = PinholeCamera((1024, 512), fov=42, pipelines=[rgb], transform=translate(0, 3.3, 0) * rotate(0, -47, 0), parent=world) camera.ray_max_depth = 500 camera.ray_extinction_prob = 0.01 camera.spectral_rays = 1 camera.spectral_bins = 15 camera.pixel_samples = 50 # start ray tracing ion() for p in range(1, 1000): print("Rendering pass {}".format(p)) camera.observe() camera.pipelines[0].save("demo_roughen_{}_samples.png".format(p)) print()
power_green = PowerPipeline2D(filter=filter_green, display_unsaturated_fraction=0.96, name="Green Filter") power_green.display_update_time = 15 power_red = PowerPipeline2D(filter=filter_red, display_unsaturated_fraction=0.96, name="Red Filter") power_red.display_update_time = 15 rgb = RGBPipeline2D(display_unsaturated_fraction=0.96, name="sRGB") bayer = BayerPipeline2D(filter_red, filter_green, filter_blue, display_unsaturated_fraction=0.96, name="Bayer Filter") bayer.display_update_time = 15 pipelines = [rgb, power_unfiltered, power_green, power_red, bayer] sampler = RGBAdaptiveSampler2D(rgb, ratio=10, fraction=0.2, min_samples=500, cutoff=0.01) camera = PinholeCamera((1024, 1024), parent=world, transform=translate(0, 0, -3.3) * rotate(0, 0, 0), pipelines=pipelines) camera.frame_sampler = sampler camera.spectral_rays = 1 camera.spectral_bins = 15 camera.pixel_samples = 250 camera.ray_importance_sampling = True camera.ray_important_path_weight = 0.25 camera.ray_max_depth = 500 camera.ray_extinction_min_depth = 3 camera.ray_extinction_prob = 0.01 # start ray tracing ion() name = 'cornell_box' timestamp = time.strftime("%Y-%m-%d_%H-%M-%S") render_pass = 1
parent=world, transform=translate(-1, 2, 1), material=UniformSurfaceEmitter(d65_white, scale=5)) # Give the prism a high importance to ensure adequate sampling prism.material.importance = 9 rgb = RGBPipeline2D() rgb.display_sensitivity = 2.0 sampler = RGBAdaptiveSampler2D(rgb, min_samples=500) # create and setup the camera camera = PinholeCamera((1920, 1080), fov=45, parent=world, pipelines=[rgb], frame_sampler=sampler) camera.transform = translate(0, 0.075, -0.05) * rotate( 180, -45, 0) * translate(0, 0, -0.75) camera.ray_importance_sampling = True camera.ray_important_path_weight = 0.75 camera.ray_max_depth = 500 camera.ray_extinction_prob = 0.01 camera.spectral_bins = 32 camera.spectral_rays = 32 camera.pixel_samples = 250 # start ray tracing plt.ion() for p in range(0, 1000):
# like any primitive, the box can be connected to scenegraph or transformed at any moment, # not only at initialisation # rtb.parent = world # rtb.transform = translate(-60., 0, 0) # setting the integration step (can be done at initialisation) rtb.step = 0.2 # creating ray transfer pipeline pipeline = RayTransferPipeline2D() # setting up the camera camera = PinholeCamera( (256, 256), pipelines=[pipeline], frame_sampler=FullFrameSampler2D(), transform=rotate(-15., -35, 0) * translate(-10, 50, -250), parent=world) camera.fov = 45 camera.pixel_samples = 1000 camera.min_wavelength = 500. camera.max_wavelength = camera.min_wavelength + 1. # Ray transfer matrices are calculated for a single value of wavelength, # but the number of spectral bins must be equal to the number of active voxels in the grid. # This is so because the spectral array is used to store the values of ray transfer matrix. camera.spectral_bins = rtb.bins # starting ray tracing camera.observe() # let's collapse the ray transfer matrix with some emission profiles
enclosure_outer = Box(Point3D(-0.10 - enclosure_thickness, -0.02 - enclosure_thickness, -0.10 - enclosure_thickness), Point3D(0.10 + enclosure_thickness, 0.0, 0.10 + enclosure_thickness)) enclosure_inner = Box(Point3D(-0.10 - padding, -0.02 - padding, -0.10 - padding), Point3D(0.10 + padding, 0.001, 0.10 + padding)) enclosure = Subtract(enclosure_outer, enclosure_inner, material=Lambert(ConstantSF(0.2)), parent=light_box) glass_outer = Box(Point3D(-0.10, -0.02, -0.10), Point3D(0.10, 0.0, 0.10)) glass_inner = Box(Point3D(-0.10 + glass_thickness, -0.02 + glass_thickness, -0.10 + glass_thickness), Point3D(0.10 - glass_thickness, 0.0 - glass_thickness, 0.10 - glass_thickness)) glass = Subtract(glass_outer, glass_inner, material=schott("N-BK7"), parent=light_box) emitter = Box(Point3D(-0.10 + glass_thickness + padding, -0.02 + glass_thickness + padding, -0.10 + glass_thickness + padding), Point3D(0.10 - glass_thickness - padding, 0.0 - glass_thickness - padding, 0.10 - glass_thickness - padding), material=UniformVolumeEmitter(d65_white, 50), parent=light_box) # CAMERA ion() camera = PinholeCamera((256, 256), fov=40, parent=world, transform=translate(0, 0.16, -0.4) * rotate(0, -12, 0)) camera.pixel_samples = 50 camera.spectral_rays = 15 camera.spectral_bins = 15 camera.observe() ioff() rgb = camera.pipelines[0] rgb.save("stanford_bunny_{}.png".format(time.strftime("%Y-%m-%d_%H-%M-%S"))) show()
# fill light Sphere(10, world, transform=translate(7, 20, 20), material=UniformSurfaceEmitter(d65_white, 0.15)) # camera rgb = RGBPipeline2D(display_update_time=15, display_unsaturated_fraction=0.995) sampler = RGBAdaptiveSampler2D(rgb, min_samples=1000, fraction=0.1, cutoff=0.01) camera = PinholeCamera((1024, 1024), parent=world, transform=translate(0, 4, -3.5) * rotate(0, -46, 0), pipelines=[rgb], frame_sampler=sampler) camera.spectral_bins = 21 camera.spectral_rays = 21 camera.pixel_samples = 250 camera.ray_max_depth = 10000 camera.ray_extinction_min_depth = 3 camera.ray_extinction_prob = 0.002 # start ray tracing ion() name = 'diamond' timestamp = time.strftime("%Y-%m-%d_%H-%M-%S") render_pass = 1 while not camera.render_complete:
plt.xlabel('x axis (beam coords)') plt.ylabel('z axis (beam coords)') plt.title("Beam full energy density profile in r-z plane") z = np.linspace(0, 3, 200) beam_full_densities = [beam_full.density(0, 0, zz) for zz in z] beam_half_densities = [beam_half.density(0, 0, zz) for zz in z] beam_third_densities = [beam_third.density(0, 0, zz) for zz in z] plt.figure() plt.plot(z, beam_full_densities, label="full energy") plt.plot(z, beam_half_densities, label="half energy") plt.plot(z, beam_third_densities, label="third energy") plt.xlabel('z axis (beam coords)') plt.ylabel('beam component density [m^-3]') plt.title("Beam attenuation by energy component") plt.legend() camera = PinholeCamera((128, 128), parent=world, transform=translate(1.25, -3.5, 0) * rotate_basis(Vector3D(0, 1, 0), Vector3D(0, 0, 1))) camera.spectral_rays = 1 camera.spectral_bins = 15 camera.pixel_samples = 50 plt.ion() camera.observe() plt.ioff() plt.show()
from raysect.primitive.lens.spherical import * rotation = 90.0 # Instantiate world object world = World() # Create lens objects BiConvex(0.0254, 0.0052, 0.0506, 0.0506, parent=world, transform=translate(0.02, 0.02, 0) * rotate(rotation, 0.0, 0.0), material=schott("N-BK7")) BiConcave(0.0254, 0.0030, 0.052, 0.052, parent=world, transform=translate(-0.02, 0.02, 0) * rotate(rotation, 0.0, 0.0), material=schott("N-BK7")) PlanoConvex(0.0254, 0.0053, 0.0258, parent=world, transform=translate(0.02, -0.02, 0) * rotate(rotation, 0.0, 0.0), material=schott("N-BK7")) PlanoConcave(0.0254, 0.0035, 0.0257, parent=world, transform=translate(-0.02, -0.02, 0) * rotate(rotation, 0.0, 0.0), material=schott("N-BK7")) Meniscus(0.0254, 0.0036, 0.0321, 0.0822, parent=world, transform=translate(0, 0, 0) * rotate(rotation, 0.0, 0.0), material=schott("N-BK7")) # Background Checkerboard Box(Point3D(-50.0, -50.0, 0.1), Point3D(50.0, 50.0, 0.2), world, material=Checkerboard(0.01, d65_white, d65_white, 0.4, 0.8)) # Instantiate camera object, and configure its settings. plt.ion() camera = PinholeCamera((512, 512), fov=45, parent=world, transform=translate(0, 0, -0.1) * rotate(0, 0, 0)) camera.pixel_samples = 100 camera.spectral_rays = 1 camera.spectral_bins = 20 # Start ray tracing camera.observe() camera.pipelines[0].save("lens_render.png") plt.ioff() plt.show()
def check_scene(self, max_iter=200): self.vessel.material = Lambert(blue) self.camera_outer.material = Lambert(yellow) self.camera_top.material = Lambert(yellow) self.source.material = Lambert(green) self.top_pinhole.material = Lambert(green) self.out_pinhole.material = Lambert(green) # cube walls bottom = Box(lower=Point3D(-0.99, -1.02, -0.99), upper=Point3D(0.99, -1.01, 0.99), parent=self.world, material=Lambert(red)) # top = Box(lower=Point3D(-0.99, 1.01, -0.99), upper=Point3D(0.99, 1.02, 0.99), parent=self.world, # material=Lambert(red)) left = Box(lower=Point3D(1.01, -0.99, -0.99), upper=Point3D(1.02, 0.99, 0.99), parent=self.world, material=Lambert(yellow)) # right = Box(lower=Point3D(-1.02, -0.99, -0.99), upper=Point3D(-1.01, 0.99, 0.99), parent=self.world, # material=Lambert(purple)) back = Box(lower=Point3D(-0.99, -0.99, 1.01), upper=Point3D(0.99, 0.99, 1.02), parent=self.world, material=Lambert(orange)) # various wall light sources light_front = Box(lower=Point3D(-1.5, -1.5, -10.1), upper=Point3D(1.5, 1.5, -10), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0)) light_top = Box(lower=Point3D(-0.99, 1.01, -0.99), upper=Point3D(0.99, 1.02, 0.99), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0), transform=translate(0, 1.0, 0)) light_bottom = Box(lower=Point3D(-0.99, -3.02, -0.99), upper=Point3D(0.99, -3.01, 0.99), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0), transform=translate(0, 1.0, 0)) light_right = Box(lower=Point3D(-1.92, -0.99, -0.99), upper=Point3D(-1.91, 0.99, 0.99), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0)) light_left = Box(lower=Point3D(1.91, -0.99, -0.99), upper=Point3D(1.92, 0.99, 0.99), parent=self.world, material=UniformSurfaceEmitter(d65_white, 1.0)) # Process the ray-traced spectra with the RGB pipeline. rgb = RGBPipeline2D() # camera pix = 1000 camera = PinholeCamera( (pix, pix), pipelines=[rgb], transform=translate(-0.01, 0.0, -0.25) * rotate(0, 0, 0)) # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(0.0, 0.0, 0.4) * rotate(180, 0, 0)) # top view # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(0.0, self.vessel_out_rad+0.15, self.vessel_width/2)*rotate(0, -90, 0)) # prof camera = PinholeCamera( (pix, pix), pipelines=[rgb], transform=translate(-0.13, 0.13, -0.2) * rotate(-25, -25.0, 0)) # camera top side # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(self.x_shift_top, self.top_px_first_y+0.0004, self.top_px_z-self.cam_in_radius+0.005)*rotate(0, 0, 0)) # camera top down-up # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(self.x_shift_top, self.top_px_first_y-0.01, self.vessel_width/2)*rotate(0, 90, 0)) # camera top up-down # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(self.x_shift_top-0.004, self.top_px_first_y+self.lid_top+self.tube_height-0.01, self.vessel_width/2)*rotate(0, -90, 0)) # camera out side # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(-self.vessel_out_rad-0.015, 0.000, self.vessel_width/2-self.cam_in_radius/2+0.0001)) # camera out down-up # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(self.out_px_first_x+0.005+0.005, 0.0, self.vessel_width/2)*rotate(90, 0, 0)) # camera out up-down # camera = PinholeCamera((pix, pix), pipelines=[rgb], transform=translate(-self.vessel_out_rad-self.tube_height-0.01, 0.0, self.vessel_width/2-0.005)*rotate(-90, 0, 0)) # camera - pixel sampling settings camera.fov = 60 # 45 camera.pixel_samples = 10 # camera - ray sampling settings camera.spectral_rays = 1 camera.spectral_bins = 25 camera.parent = self.world plt.ion() p = 1 while not camera.render_complete: print("Rendering pass {}...".format(p)) camera.observe() print() p += 1 if p > max_iter: break plt.ioff() rgb.display()
# four strip lights Cylinder(0.5, 1.0, world, transform=translate(0.5, 5, 8) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 1.0)) Cylinder(0.5, 1.0, world, transform=translate(0.5, 5, 6) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 1.0)) Cylinder(0.5, 1.0, world, transform=translate(0.5, 5, 4) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 1.0)) Cylinder(0.5, 1.0, world, transform=translate(0.5, 5, 2) * rotate(90, 0, 0), material=UniformSurfaceEmitter(d65_white, 1.0)) rgb = RGBPipeline2D(display_unsaturated_fraction=0.96) # observer camera = PinholeCamera((1024, 512), pipelines=[rgb], transform=translate(0, 3.3, 0) * rotate(0, -47, 0), fov=42, parent=world) camera.ray_max_depth = 5 camera.ray_extinction_prob = 0.01 camera.spectral_rays = 1 camera.spectral_bins = 15 camera.pixel_samples = 250 # start ray tracing ion() for p in range(1, 1000): print("Rendering pass {}".format(p)) camera.observe() camera.pipelines[0].save("demo_roughen_{}.png".format(p)) print()
material=Checkerboard(4, d65_white, d65_white, 0.1, 2.0), transform=rotate(45, 0, 0)) # Sphere # Note that the sphere must be displaced slightly above the ground plane to prevent numerically issues that could # cause a light leak at the intersection between the sphere and the ground. sphere = Sphere(radius=1.5, transform=translate(0, 0.0001, 0), material=schott("N-BK7")) # 2. Add Observer # --------------- # Process the ray-traced spectra with the RGB pipeline. rgb = RGBPipeline2D() # camera camera = PinholeCamera((512, 512), pipelines=[rgb], transform=translate(0, 10, -10) * rotate(0, -45, 0)) # camera - pixel sampling settings camera.fov = 45 camera.pixel_samples = 250 # camera - ray sampling settings camera.spectral_rays = 1 camera.spectral_bins = 20 camera.ray_max_depth = 100 camera.ray_extinction_prob = 0.1 camera.min_wavelength = 375.0 camera.max_wavelength = 740.0 # 3. Build Scenegraph
from raysect.optical import World, translate, rotate from raysect.optical.observer import PinholeCamera from raysect.primitive import Sphere world = World() sphere = Sphere(parent=world, transform=translate(2, 0, 0)) camera = PinholeCamera((256, 256), fov=40, parent=world, transform=translate(0, 0.16, -0.4) * rotate(0, -12, 0)) from raysect_vtk import visualise_scenegraph visualise_scenegraph(camera, focal_distance=3, zoom=0.5)
Meniscus(0.0254, 0.0036, 0.0321, 0.0822, parent=world, transform=translate(0, 0, 0) * rotate(rotation, 0.0, 0.0), material=schott("N-BK7")) # Background Checkerboard Box(Point3D(-50.0, -50.0, 0.1), Point3D(50.0, 50.0, 0.2), world, material=Checkerboard(0.01, d65_white, d65_white, 0.4, 0.8)) # Instantiate camera object, and configure its settings. plt.ion() camera = PinholeCamera((512, 512), fov=45, parent=world, transform=translate(0, 0, -0.1) * rotate(0, 0, 0)) camera.pixel_samples = 100 camera.spectral_rays = 1 camera.spectral_bins = 20 # Start ray tracing camera.observe() camera.pipelines[0].save("lens_render.png") plt.ioff() plt.show()
distance = 3.2 for i in range(9): increment = angle_increments[i] Sphere(radius, world, transform=rotate(increment * angle, 0, 0) * translate(0, radius + 0.00001, distance), material=UniformSurfaceEmitter(colours[i])) # diffuse ground plane Box(Point3D(-100, -0.1, -100), Point3D(100, 0, 100), world, material=Lambert(ConstantSF(0.5))) rgb = RGBPipeline2D(name="sRGB") sampler = RGBAdaptiveSampler2D(rgb, ratio=10, fraction=0.2, min_samples=500, cutoff=0.05) # observer camera = PinholeCamera((512, 256), fov=42, parent=world, transform=translate(0, 3.3, 0) * rotate(0, -47, 0), pipelines=[rgb], frame_sampler=sampler) camera.spectral_bins = 25 camera.pixel_samples = 250 # start ray tracing ion() p = 1 while not camera.render_complete: print("Rendering pass {}...".format(p)) camera.observe() print() p += 1 ioff() rgb.display()
#import cProfile # # def profile_test(n=25000): # r = Ray(origin=Point(0.0, 0, 0), min_wavelength=526, max_wavelength=532, num_samples=100) # for i in range(n): # r.trace(world) # # cProfile.run("profile_test()", sort="tottime") ion() r = Ray(origin=Point3D(0.5, 0, -2.5), min_wavelength=440, max_wavelength=740, bins=800) s = r.trace(world) plot(s.wavelengths, s.samples) r = Ray(origin=Point3D(0.5, 0, -2.5), min_wavelength=440, max_wavelength=740, bins=1600) s = r.trace(world) plot(s.wavelengths, s.samples) show() camera = PinholeCamera((128, 128), parent=world, transform=translate(0, 0, -2.5)) camera.spectral_rays = 1 camera.spectral_bins = 21 camera.pixel_samples = 50 ion() camera.observe() ioff() camera.pipelines[0].display()
power_green = PowerPipeline2D(filter=filter_green, display_unsaturated_fraction=0.96, name="Green Filter") power_green.display_update_time = 15 power_red = PowerPipeline2D(filter=filter_red, display_unsaturated_fraction=0.96, name="Red Filter") power_red.display_update_time = 15 rgb = RGBPipeline2D(display_unsaturated_fraction=0.96, name="sRGB") bayer = BayerPipeline2D(filter_red, filter_green, filter_blue, display_unsaturated_fraction=0.96, name="Bayer Filter") bayer.display_update_time = 15 pipelines = [rgb, power_unfiltered, power_green, power_red, bayer] sampler = RGBAdaptiveSampler2D(rgb, ratio=10, fraction=0.2, min_samples=500, cutoff=0.05) camera = PinholeCamera((512, 512), parent=world, transform=translate(0, 0, -3.3) * rotate(0, 0, 0), pipelines=pipelines) camera.frame_sampler = sampler camera.pixel_samples = 250 camera.spectral_bins = 15 camera.spectral_rays = 1 camera.ray_importance_sampling = True camera.ray_important_path_weight = 0.25 camera.ray_max_depth = 500 camera.ray_extinction_min_depth = 3 camera.ray_extinction_prob = 0.01 # start ray tracing ion() p = 1 while not camera.render_complete: