def render(self, world, use_alpha=True): total_colors = torch.zeros((self.image_width * self.image_height, 3), device=self.device) total_alpha = torch.zeros((self.image_width * self.image_height, 1), device=self.device) for _ in range(self.antialiasing): x = torch.tile( torch.linspace(0, (self.out_shape[1] - 1) / self.out_shape[1], self.out_shape[1]), (self.out_shape[0], )).unsqueeze(1) y = torch.repeat_interleave( torch.linspace(0, (self.out_shape[0] - 1) / self.out_shape[0], self.out_shape[0]), self.out_shape[1]).unsqueeze(1) x += torch.rand(x.shape) / self.out_shape[1] y += torch.rand(y.shape) / self.out_shape[0] ray = Rays(origin=self.origin, directions=self.lower_left_corner + x * self.horizontal + y * self.vertical - self.origin, device=self.device) color = torch.full(ray.pos.size(), self.background_color, device=self.device) alpha = torch.zeros((self.image_width * self.image_height, 1), device=self.device) self.timestep_init((self.image_width * self.image_height, 1)) for _ in tqdm(range(self.steps), disable=not self.debug): ray, color, alpha = self.step(ray, world, color, alpha) total_colors += color total_alpha = torch.logical_or(total_alpha, alpha) scale = 1 / self.antialiasing total_colors = torch.sqrt(scale * total_colors) return Image.from_flat(total_colors, total_alpha, self.image_width, self.image_height, use_alpha=use_alpha)
def render(self, world, antialiasing=1): colors = torch.zeros((self.image_width * self.image_height, 3), device=dev) for _ in range(antialiasing): x = torch.tile( torch.linspace(0, (self.out_shape[1] - 1) / self.out_shape[1], self.out_shape[1]), (self.out_shape[0], )).unsqueeze(1) y = torch.repeat_interleave( torch.linspace(0, (self.out_shape[0] - 1) / self.out_shape[0], self.out_shape[0]), self.out_shape[1]).unsqueeze(1) if antialiasing != 1: x += torch.rand(x.shape) / self.out_shape[1] y += torch.rand(y.shape) / self.out_shape[0] ray = Rays(origin=self.origin, directions=self.lower_left_corner + x * self.horizontal + y * self.vertical - self.origin) colors += ray.trace(world, self.render_depth) scale = 1 / antialiasing colors = torch.sqrt(scale * colors) return Image.from_flat(colors, self.image_width, self.image_height)