def __init__(self, task): assert isinstance(task, RenderTask) self.task = task self.random = Random() self.raytracer = RayTracer(task.getScene()) self.progress = 0.0
def get_frame(self, scene, image): raytracer = RayTracer(scene) aspect = float(image.height) / float(image.width) for y in range(image.height): for x in range(image.width): x_coefficient = ((x + random()) * 2.0 / image.width) - 1.0 y_coefficient = ((y + random()) * 2.0 / image.height) - 1.0 offset = self.right * x_coefficient + self.up * (y_coefficient * aspect) sample_direction = (self.view_direction + (offset * tan(self.view_angle * 0.5))).unitize() radiance = raytracer.get_radiance(self.view_position, sample_direction) image.add_to_pixel(x, y, radiance)
def __init__(self, task): if not isinstance(task, RenderTask): raise TypeError( "Incorrect task type: {}. Should be RenderTask".format( type(task))) self.task = task self.random = Random() self.raytracer = RayTracer(task.getScene()) self.progress = 0.0
def setUp(self): """ This is automatically called by the unittest suite when the class is created. This set up assumes the following: Black Hole spin = 0.8 Sensor Shape = 10 x 10 Camera Properties: - r = 10 - theta = 1.415 - phi = 0 Ray Properties: - ThetaCS = 1.445, - PhiCS = -0.659734 This properties generate a ray that curves around the black hole and comes back. The initial conditions and black hole properties are selected in a way that the ray is very unstable in the sense that a little variation of this set-up makes the ray to end in the horizon. This is very good to test numerical stability. """ # Black hole constants spin = 0.8 innerDiskRadius = 1 outerDiskRadius = 1 # Camera position camR = 10 camTheta = 1.415 camPhi = 0 # Camera lens properties camFocalLength = 3 camSensorShape = (10, 10) # (Rows, Columns) camSensorSize = (2, 2) # (Height, Width) # Create the black hole, the camera and the metric with the constants # above blackHole = BlackHole(spin, innerDiskRadius, outerDiskRadius) camera = Camera(camR, camTheta, camPhi, camFocalLength, camSensorShape, camSensorSize) kerr = KerrMetric(camera, blackHole) # Set camera's speed (it needs the kerr metric constants) camera.setSpeed(kerr, blackHole) # Monky-patch the raytracer with the overrider RayTracer.override_initial_conditions = override_initial_conditions RayTracer.collect_rays = collect_rays # Create the raytracer! self.rayTracer = RayTracer(camera, kerr, blackHole) # Override the initial conditions of the raytracer using the self.rayTracer.override_initial_conditions(10, 1.415, 0, 1.445, -0.659734)
def get_frame(self, scene, image, frame_type='depth'): raytracer = RayTracer(scene) aspect = float(image.height) / float(image.width) for y in range(image.height): for x in range(image.width): x_coefficient = ((x + random()) * 2.0 / image.width) - 1.0 y_coefficient = ((y + random()) * 2.0 / image.height) - 1.0 offset = self.right * x_coefficient + self.up * (y_coefficient * aspect) sample_direction = (self.view_direction + (offset * tan(self.view_angle * 0.5))).unitize() if frame_type == 'depth': distance = raytracer.get_distance_to_first_hit(self.view_position, sample_direction) image.add_to_pixel(x, y, distance) elif frame_type == 'reverse_depth': distance = raytracer.get_distance_to_last_hit(self.view_position, sample_direction) image.add_to_pixel(x, y, distance) else: radiance = raytracer.get_radiance(self.view_position, sample_direction) image.add_to_pixel(x, y, radiance)
def get_frame(self, scene, image): raytracer = RayTracer(scene) aspect = float(image.height) / float(image.width) for y in range(image.height): for x in range(image.width): ## convert x, y into sample_direction x_coefficient = ((x + random()) * 2.0 / image.width) - 1.0 y_coefficient = ((y + random()) * 2.0 / image.height) - 1.0 offset = self.right * x_coefficient + self.up * (y_coefficient * aspect) sample_direction = (self.view_direction + (offset * tan(self.view_angle * 0.5))).unitize() # calculate radiance from that direction radiance = raytracer.get_radiance(self.view_position, sample_direction) # and add to image image.add_radiance(x, y, radiance)
def pixel_accumulated_radiance(self, scene, random, width, height, x, y, aspect, num_samples): raytracer = RayTracer(scene) acc_radiance = [0.0, 0.0, 0.0] for i in range(num_samples): x_coefficient = ((x + random.real64()) * 2.0 / width) - 1.0 y_coefficient = ((y + random.real64()) * 2.0 / height) - 1.0 offset = self.right * x_coefficient + self.up * (y_coefficient * aspect) sample_direction = ( self.view_direction + (offset * tan(self.view_angle * 0.5))).unitize() radiance = raytracer.get_radiance(self.view_position, sample_direction, random) acc_radiance[0] += radiance[0] acc_radiance[1] += radiance[1] acc_radiance[2] += radiance[2] return Vector3f(acc_radiance[0], acc_radiance[1], acc_radiance[2])
class Scene: def __init__(self): self.film = Film() self.sampler = TileSampler() self.ray_tracer = RayTracer() self.camera = Camera() def render(self): width = self.film.image.shape[1] height = self.film.image.shape[0] for sample in self.sampler.get_sample(): ray = self.camera.generate_ray(sample, width, height) color = self.ray_tracer.trace(ray, 0) self.film.commit(sample, color) if int(sample[1]) % 50 == 0: print('%s: (%d, %d)' % (self.film.filename, sample[0], sample[1])) self.film.write_image()
# Camera position camR = 30 camTheta = 1.511 camPhi = 0 # Camera lens properties camFocalLength = 1.5 camSensorShape = (1000, 1500) # (Rows, Columns) camSensorSize = (2, 3) # (Height, Width) # Create the black hole, the camera and the metric with the constants # above blackHole = BlackHole(spin, innerDiskRadius, outerDiskRadius) camera = Camera(camR, camTheta, camPhi, camFocalLength, camSensorShape, camSensorSize) kerr = KerrMetric(camera, blackHole) # Set camera's speed (it needs the kerr metric constants) camera.setSpeed(kerr, blackHole) # Create the raytracer! rayTracer = RayTracer(camera, kerr, blackHole) rayTracer.rayTrace(-170, kernelCalls=1) # Load the textures disk = mimg.imread('../../Res/Textures/adisk.png')[:, :, :3] sphere = mimg.imread('../../Res/Textures/milkyWay.png')[:, :, :3] # Create the image rayTracer.texturedImage(disk.astype(np.float64), sphere.astype(np.float64))
camFocalLength = 3 camSensorShape = (101, 101) # (Rows, Columns) camSensorSize = (2, 2) # (Height, Width) # Create the black hole, the camera and the metric with the constants # above blackHole = BlackHole(spin) camera = Camera(camR, camTheta, camPhi, camFocalLength, camSensorShape, camSensorSize) kerr = KerrMetric(camera, blackHole) # Set camera's speed (it needs the kerr metric constants) camera.setSpeed(kerr, blackHole) # Create the raytracer! rayTracer = RayTracer(camera, kerr, blackHole, debug=False) # Set initial and final times, the number of the steps for the # simulation and compute the step size tInit = 0. tEnd = -90. numSteps = 200 stepSize = (tEnd - tInit) / numSteps # Retrieve the initial state of the system for plotting purposes rays = rayTracer.systemState[:, :, :3] plotData = np.zeros(rays.shape + (numSteps+1, )) plotData[:, :, :, 0] = rays status = np.empty(camSensorShape + (numSteps+1, )) status[:, :, 0] = 1
# Camera lens properties camFocalLength = 3 camSensorShape = (1000, 1000) # (Rows, Columns) camSensorSize = (2, 2) # (Height, Width) # Create the black hole, the camera and the metric with the constants # above blackHole = BlackHole(spin, innerDiskRadius, outerDiskRadius) camera = Camera(camR, camTheta, camPhi, camFocalLength, camSensorShape, camSensorSize) kerr = KerrMetric(camera, blackHole) # Set camera's speed (it needs the kerr metric constants) camera.setSpeed(kerr, blackHole) for _ in range(1): # Create the raytracer! rayTracer = RayTracer(camera, kerr, blackHole) # Draw the image rayTracer.rayTrace(-128, kernelCalls=1) print("Time: ", rayTracer.totalTime) rayTracer.synchronise() # # np.savetxt("data.csv", rayTracer.systemState[20, 20, :]) rayTracer.plotImage() # # Generate the 3D scene # rayTracer.generate3Dscene(-70, 500) # rayTracer.plotScene()
class RenderWorker: @classmethod def createWorker(cls, renderTask): if not renderTask.isValid(): return None return RenderWorker(renderTask) def __init__(self, task): if not isinstance(task, RenderTask): raise TypeError( "Incorrect task type: {}. Should be RenderTask".format( type(task))) self.task = task self.random = Random() self.raytracer = RayTracer(task.getScene()) self.progress = 0.0 def get_progress(self): return self.progress def sample_radiance(self, x, y, w, h, aspect, camera, scene, num_samples): acc_radiance = [0.0, 0.0, 0.0] for i in range(num_samples): x_coefficient = ((x + self.random.real64()) * 2.0 / w) - 1.0 y_coefficient = ((y + self.random.real64()) * 2.0 / h) - 1.0 offset = camera.right * x_coefficient + camera.up * ( y_coefficient * aspect) sample_direction = ( camera.view_direction + (offset * tan(camera.view_angle * 0.5))).unitize() radiance = self.raytracer.get_radiance(camera.view_position, sample_direction, self.random) acc_radiance[0] += radiance[0] acc_radiance[1] += radiance[1] acc_radiance[2] += radiance[2] return Vector3f(acc_radiance[0], acc_radiance[1], acc_radiance[2]) def getXY(self, idx, w): return idx % w, idx // w def renderingFinished(self, pixels): result = RenderTaskResult.createRenderTaskResult( self.task.getDesc(), pixels) if result: if self.task.callback: self.task.callback(result) else: print "Failed to acquire result" return result def render(self): desc = self.task.getDesc() x, y, w, h = desc.getX(), desc.getY(), desc.getW(), desc.getH() num_pixels, num_samples = desc.getNumPixels(), desc.getNumSamples() aspect = float(h) / float(w) offset = y * w + x id = desc.getID() pixels = [0.0] * 3 * num_pixels cam = self.task.getCamera() scn = self.task.getScene() for k in range(num_pixels): x, y = self.getXY(k + offset, w) radiance = self.sample_radiance(x, y, w, h, aspect, cam, scn, num_samples) pixels[3 * k + 0] = radiance[0] pixels[3 * k + 1] = radiance[1] pixels[3 * k + 2] = radiance[2] progress = float(k + 1) / float(num_pixels) return self.renderingFinished(pixels)
class Test_Solver(unittest.TestCase): """Test suite for raytracer RK45 solver""" def setUp(self): """ This is automatically called by the unittest suite when the class is created. This set up assumes the following: Black Hole spin = 0.8 Sensor Shape = 10 x 10 Camera Properties: - r = 10 - theta = 1.415 - phi = 0 Ray Properties: - ThetaCS = 1.445, - PhiCS = -0.659734 This properties generate a ray that curves around the black hole and comes back. The initial conditions and black hole properties are selected in a way that the ray is very unstable in the sense that a little variation of this set-up makes the ray to end in the horizon. This is very good to test numerical stability. """ # Black hole constants spin = 0.8 innerDiskRadius = 1 outerDiskRadius = 1 # Camera position camR = 10 camTheta = 1.415 camPhi = 0 # Camera lens properties camFocalLength = 3 camSensorShape = (10, 10) # (Rows, Columns) camSensorSize = (2, 2) # (Height, Width) # Create the black hole, the camera and the metric with the constants # above blackHole = BlackHole(spin, innerDiskRadius, outerDiskRadius) camera = Camera(camR, camTheta, camPhi, camFocalLength, camSensorShape, camSensorSize) kerr = KerrMetric(camera, blackHole) # Set camera's speed (it needs the kerr metric constants) camera.setSpeed(kerr, blackHole) # Monky-patch the raytracer with the overrider RayTracer.override_initial_conditions = override_initial_conditions RayTracer.collect_rays = collect_rays # Create the raytracer! self.rayTracer = RayTracer(camera, kerr, blackHole) # Override the initial conditions of the raytracer using the self.rayTracer.override_initial_conditions(10, 1.415, 0, 1.445, -0.659734) def test_mathematica_comparison(self): """ This test integrates the initial conditions described in the setUp method in a 10 x 10 grid using steps of -0.1 and compares each ray (that are supossed to be equal) to the result of the integration using Mathematica's NDSolve engine with the following options: - WorkingPrecission = 30 As the ray is integrated using RK45 solver, the ray coordinates in each step are supposed to coincide up to 5 decimal places. """ # Reset the initial conditions of the raytracer self.rayTracer.override_initial_conditions(10, 1.415, 0, 1.445, -0.659734) # Integrate the rays and collect the data self.rayTracer.collect_rays() # Read the Mathematica's ray data from the csv mathematica_data = np.genfromtxt("test_data/test_ray.csv", delimiter=",") mathematica_data = mathematica_data[:301] # Compare each pixel's ray coordinates up to 5 decimal places with # mathematica's result. # Sorry for the loop with the numpy array!! for row in range(0, self.rayTracer.imageRows): for col in range(0, self.rayTracer.imageCols): npt.assert_almost_equal(mathematica_data, self.rayTracer.rayData[row, col].T, decimal=5) def test_idempotency(self): """ This test checks that the result of integrating the ray that has the initial conditions of the setUp method are the same no matter how many times we call the kernel (after reseting the initial_conditios). The pourpose of the test is to detect memory leaks and erros in the kernel. """ # Reset the initial conditions of the raytracer self.rayTracer.override_initial_conditions(10, 1.415, 0, 1.445, -0.659734) # Integrate the rays and collect the data self.rayTracer.collect_rays() # Read the Mathematica's ray data from the csv first_run_data = self.rayTracer.rayData # Second run self.rayTracer.override_initial_conditions(10, 1.415, 0, 1.445, -0.659734) self.rayTracer.collect_rays() second_run_data = self.rayTracer.rayData self.assertTrue(np.all(first_run_data == second_run_data))
def __init__(self): self.film = Film() self.sampler = TileSampler() self.ray_tracer = RayTracer() self.camera = Camera()