def render(self): if ti.static(len(self.cameras)): for camera in ti.static(self.cameras): camera.render(self) else: ti.static_print('Warning: no cameras')
def denoise(self, alpha: ti.template()): ti.static_print('denoise', alpha) if ti.static(alpha != 0): for I in ti.grouped(self.buf): center = ts.clamp(self.buf[I]) around = ts.clamp((self.buf[I + ts.D.x_] + self.buf[I + ts.D.X_] + self.buf[I + ts.D._x] + self.buf[I + ts.D._X]) / 4) #amax = ts.clamp(max(self.buf[I + ts.D.x_], self.buf[I + ts.D.X_], self.buf[I + ts.D._x], self.buf[I + ts.D._X])) #amin = ts.clamp(min(self.buf[I + ts.D.x_], self.buf[I + ts.D.X_], self.buf[I + ts.D._x], self.buf[I + ts.D._X])) #if center <= amin + throttle or center >= amax - throttle: self.buf[I] = center * (1 - alpha) + around * alpha
def display_output_line_init_points(self) -> ti.i32: ti.static_print("init output line search") ## calc output point coords, evenly spaced on the line point_start = ti.Vector( [self.output_line_ends[0][0], self.output_line_ends[0][1]]) point_end = ti.Vector( [self.output_line_ends[1][0], self.output_line_ends[1][1]]) dx = (point_end - point_start) / (self.output_line_num_points - 1) for i in range(self.output_line_num_points): self.output_line_points[i] = point_start + dx * i
def _loadrays(self, topleft: ti.template(), region: ti.template(), skipstep: ti.template()): ti.static_print('loadrays:', topleft, region, skipstep) for II in ti.grouped(ti.ndrange(*region)): I = II * skipstep + topleft for J in ti.static(ti.grouped(ti.ndrange(skipstep, skipstep))): self.img[I + J] *= 0 for II in ti.grouped(ti.ndrange(*region)): i = II.dot(ts.vec(1, region[0])) I = II * skipstep + topleft + skipstep * ti.random() coor = ts.vec2((I.x - self.cx) / self.fx, (I.y - self.cy) / self.fy) orig, dir = self.generate(coor) self.ro[i] = orig self.rd[i] = dir self.rc[i] = ts.vec3(1.0) self.rI[i] = II
def backtrace(self, vf, u, v, dt): p = ti.Vector([u, v]) + 0.5 if ti.static(self.RK == 1): p -= dt * vf[u, v] #RK1 elif ti.static(self.RK == 2): mid = p - 0.5 * dt * vf[u, v] p -= dt * self.sample(vf, mid[0], mid[1]) elif ti.static(self.RK == 3): v1 = vf[u, v] p1 = p - 0.5 * dt * v1 v2 = self.sample(vf, p1[0], p1[1]) p2 = p - 0.75 * dt * v v3 = self.sample(vf, p2[0], p2[1]) p -= dt * (2 / 9 * v1 + 1 / 3 * v2 + 4 / 9 * v3) else: ti.static_print(f"unsupported order for RK{self.RK}") return p
def backtrace(I, dt): p = (I + stagger) * dx if ti.static(rk == 1): p -= dt * velocity(p) elif ti.static(rk == 2): p_mid = p - 0.5 * dt * velocity(p) p -= dt * velocity(p_mid) elif ti.static(rk == 3): v1 = velocity(p) p1 = p - 0.5 * dt * v1 v2 = velocity(p1) p2 = p - 0.75 * dt * v2 v3 = velocity(p2) p -= dt * (2 / 9 * v1 + 1 / 3 * v2 + 4 / 9 * v3) else: ti.static_print(f"RK{rk} is not supported.") return p
def render(self, scene): self.fb.clear_buffer() # sets up light directions if ti.static(len(scene.lights)): for light in ti.static(scene.lights): light.set_view(self) # TODO: t3.Light should be a subclass of t3.ModelBase? else: ti.static_print('Warning: no lights') if ti.static(len(scene.models)): for model in ti.static(scene.models): model.set_view(self) # sets up ModelView matrix for model in ti.static(scene.models): model.render(self) else: ti.static_print('Warning: no models') self.fb.update_buffer()
def _render(self): if ti.static(len(self.cameras)): for camera in ti.static(self.cameras): camera.fb.clear_buffer() # sets up light directions if ti.static(len(self.lights)): for light in ti.static(self.lights): light.set_view(camera) else: ti.static_print('Warning: no lights') if ti.static(len(self.models)): for model in ti.static(self.models): model.render(camera) else: ti.static_print('Warning: no models') else: ti.static_print('Warning: no cameras')
def func(): for i in range(N): x[i] = abs(-i) print(x[i]) ti.static_print(x[i])
def render(self): if ti.static(len(self.buffers)): for buffer in ti.static(self.buffers): buffer.render() else: ti.static_print('Warning: no cameras / buffers')