def clear_framebuffer(self):
     self.max_valid_sample_step_count[None] = 0
     for i, j, k in self.render_tape:
         self.render_tape[i, j, k] = tl.vec4(0.0)
     for i, j in self.valid_sample_step_count:
         self.valid_sample_step_count[i, j] = 1
         self.output_rgba[i, j] = tl.vec4(0.0)
         self.output_rgb[i, j] = tl.vec3(0.0)
Exemplo n.º 2
0
 def clear_framebuffer(self):
     ''' Clears the framebuffer `output_rgba` and the `render_tape`'''
     self.max_valid_sample_step_count[None] = 0
     for i, j, k in self.render_tape:
         self.render_tape[i, j, k] = tl.vec4(0.0)
     for i, j in self.valid_sample_step_count:
         self.valid_sample_step_count[i, j] = 1
         self.output_rgba[i, j] = tl.vec4(0.0)
Exemplo n.º 3
0
def render_particle(model, camera, index):
    scene = model.scene
    a = (model.L2C[None] @ ts.vec4(model.pos[index], 1)).xyz
    r = model.radius[index]
    A = camera.uncook(a)

    rad = camera.uncook(ts.vec3(r, r, a.z), False)

    M = int(ti.floor(A - rad))
    N = int(ti.ceil(A + rad))
    M = ts.clamp(M, 0, ti.Vector(camera.res))
    N = ts.clamp(N, 0, ti.Vector(camera.res))

    for X in ti.grouped(ti.ndrange((M.x, N.x), (M.y, N.y))):
        pos = camera.cook(float(ts.vec3(X, a.z)))
        dp = pos - a
        dp2 = dp.norm_sqr()

        if dp2 > r**2:
            continue

        dz = ti.sqrt(r**2 - dp2)
        if camera.fb.atomic_depth(X, a.z - dz):
            continue

        n = ts.vec3(dp.xy, -dz)
        normal = ts.normalize(n)
        view = ts.normalize(a + n)

        color = model.colorize(pos, normal)
        camera.fb['img'][X] = color
        camera.fb['normal'][X] = normal
Exemplo n.º 4
0
    def generate(self, coor):
        orig = ts.vec3(0.0)
        dir = ts.vec3(0.0, 0.0, 1.0)

        if ti.static(self.type == self.ORTHO):
            orig = ts.vec3(coor, 0.0)
        elif ti.static(self.type == self.TAN_FOV):
            uv = coor * self.fov
            dir = ts.normalize(ts.vec3(uv, 1))
        elif ti.static(self.type == self.COS_FOV):
            uv = coor * self.fov
            dir = ts.vec3(ti.sin(uv), ti.cos(uv.norm()))

        orig = (self.L2W[None] @ ts.vec4(orig, 1)).xyz
        dir = (self.L2W[None] @ ts.vec4(dir, 0)).xyz

        return orig, dir
Exemplo n.º 5
0
def intersect_triangle(model, orig, dir, face):
    posa, posb, posc = face.pos
    texa, texb, texc = face.tex
    nrma, nrmb, nrmc = face.nrm

    L2C = model.L2W[None]
    posa = (L2C @ ts.vec4(posa, 1)).xyz
    posb = (L2C @ ts.vec4(posb, 1)).xyz
    posc = (L2C @ ts.vec4(posc, 1)).xyz
    nrma = (L2C @ ts.vec4(nrma, 0)).xyz
    nrmb = (L2C @ ts.vec4(nrmb, 0)).xyz
    nrmc = (L2C @ ts.vec4(nrmc, 0)).xyz
    tan, bitan = compute_tangent(posb - posa, posc - posa, texb - texa, texc - texa)

    hit = 1e6
    clr = ts.vec3(0.0)
    sa, sb, sc = plucker_bcoor(orig, orig + dir, posa, posb, posc)
    if (sa >= 0 and sb >= 0 and sc >= 0) or (sa <= 0 and sb <= 0 and sc <= 0):
        snorm = sa + sb + sc
        sa /= snorm
        sb /= snorm
        sc /= snorm
        pos = posa * sa + posb * sb + posc * sc
        tex = texa * sa + texb * sb + texc * sc
        nrm = nrma * sa + nrmb * sb + nrmc * sc
        if dir.dot(pos - orig) > 1e-4:
            hit = (pos - orig).norm()
            orig, dir, clr = model.material.radiance(model, pos, dir, tex, nrm, tan, bitan)

    return hit, orig, dir, clr
Exemplo n.º 6
0
 def colorize(self):
     pos = self.pos
     normal = self.normal
     res = ts.vec3(0.0)
     viewdir = pos.normalized()
     wpos = (self.model.scene.cameras[-1].L2W @ ts.vec4(pos, 1)).xyz  # TODO: get curr camera?
     if ti.static(self.model.scene.lights):
         for light in ti.static(self.model.scene.lights):
             strength = light.shadow_occlusion(wpos)
             if strength >= 1e-3:
                 subclr = self.render_func(pos, normal, viewdir, light)
                 res += strength * subclr
     res += self.get_emission()
     return res
Exemplo n.º 7
0
    def shadow_occlusion(self, wpos):
        if ti.static(self.shadow is None):
            return 1

        lspos = (self.shadow.L2W[None] @ ts.vec4(wpos, 1)).xyz
        lscoor = self.shadow.uncook(lspos)

        cur_idepth = self.shadow.fb.idepth_fixp(1 / lspos.z)

        l = self._sub_SDlerp(cur_idepth, lscoor, ts.D.X_)
        r = self._sub_SDlerp(cur_idepth, lscoor, ts.D.x_)
        t = self._sub_SDlerp(cur_idepth, lscoor, ts.D._x)
        b = self._sub_SDlerp(cur_idepth, lscoor, ts.D._X)
        c = self._sub_SDlerp(cur_idepth, lscoor, ts.D.__)
        return (l + r + t + b + c * 4) / 8
Exemplo n.º 8
0
def v4trans(mat, vec, wei):
    ti.static_assert(vec.n == 3, vec.n)

    if ti.static(vec.m == 1):
        return (mat @ ts.vec4(vec, wei)).xyz

    tmp = ti.Matrix.zero(float, 4, vec.m)
    for i, j in ti.static(ti.ndrange(vec.n, vec.m)):
        tmp[i, j] = vec[i, j]
    for i in ti.static(range(vec.m)):
        tmp[3, i] = wei
    tmp = mat @ tmp
    ret = ti.Matrix.zero(float, vec.n, vec.m)
    for i, j in ti.static(ti.ndrange(vec.n, vec.m)):
        ret[i, j] = tmp[i, j]
    return ret
Exemplo n.º 9
0
 def raycast(self, sampling_rate: float):
     ''' Produce a rendering. Run compute_entry_exit first! '''
     for i, j in self.valid_sample_step_count:  # For all pixels
         for sample_idx in range(self.sample_step_nums[i, j]):
             look_from = self.cam_pos[None]
             if self.render_tape[i, j, sample_idx -
                                 1].w < 0.99 and sample_idx < ti.static(
                                     self.max_samples):
                 tmax = self.exit[i, j]
                 n_samples = self.sample_step_nums[i, j]
                 ray_len = (tmax - self.entry[i, j])
                 tmin = self.entry[
                     i,
                     j] + 0.5 * ray_len / n_samples  # Offset tmin as t_start
                 vd = self.rays[i, j]
                 pos = look_from + tl.mix(
                     tmin, tmax,
                     float(sample_idx) /
                     float(n_samples - 1)) * vd  # Current Pos
                 light_pos = look_from + tl.vec3(0.0, 1.0, 0.0)
                 intensity = self.sample_volume_trilinear(pos)
                 sample_color = self.apply_transfer_function(intensity)
                 opacity = 1.0 - ti.pow(1.0 - sample_color.w,
                                        1.0 / sampling_rate)
                 # if sample_color.w > 1e-3:
                 normal = self.get_volume_normal(pos)
                 light_dir = (
                     pos -
                     light_pos).normalized()  # Direction to light source
                 n_dot_l = max(normal.dot(light_dir), 0.0)
                 diffuse = self.diffuse * n_dot_l
                 r = tl.reflect(light_dir,
                                normal)  # Direction of reflected light
                 r_dot_v = max(r.dot(-vd), 0.0)
                 specular = self.specular * pow(r_dot_v, self.shininess)
                 shaded_color = tl.vec4(
                     ti.min(1.0, diffuse + specular + self.ambient) *
                     sample_color.xyz * opacity * self.light_color, opacity)
                 self.render_tape[i, j, sample_idx] = (
                     1.0 - self.render_tape[i, j, sample_idx - 1].w
                 ) * shaded_color + self.render_tape[i, j, sample_idx - 1]
                 self.valid_sample_step_count[i, j] += 1
             else:
                 self.render_tape[i, j, sample_idx] = self.render_tape[
                     i, j, sample_idx - 1]
Exemplo n.º 10
0
    def raycast_nondiff(self, sampling_rate: float):
        ''' Raycasts in a non-differentiable (but faster and cleaner) way. Use `get_final_image_nondiff` with this.

        Args:
            sampling_rate (float): Sampling rate (multiplier with Nyquist frequence)
        '''
        for i, j in self.valid_sample_step_count:  # For all pixels
            for cnt in range(self.sample_step_nums[i, j]):
                look_from = self.cam_pos[None]
                if self.render_tape[i, j, 0].w < 0.99:
                    tmax = self.exit[i, j]
                    n_samples = self.sample_step_nums[i, j]
                    ray_len = (tmax - self.entry[i, j])
                    tmin = self.entry[
                        i,
                        j] + 0.5 * ray_len / n_samples  # Offset tmin as t_start
                    vd = self.rays[i, j]
                    pos = look_from + tl.mix(
                        tmin, tmax,
                        float(cnt) / float(n_samples - 1)) * vd  # Current Pos
                    light_pos = look_from + tl.vec3(0.0, 1.0, 0.0)
                    intensity = self.sample_volume_trilinear(pos)
                    sample_color = self.apply_transfer_function(intensity)
                    opacity = 1.0 - ti.pow(1.0 - sample_color.w,
                                           1.0 / sampling_rate)
                    if sample_color.w > 1e-3:
                        normal = self.get_volume_normal(pos)
                        light_dir = (pos - light_pos).normalized(
                        )  # Direction to light source
                        n_dot_l = max(normal.dot(light_dir), 0.0)
                        diffuse = self.diffuse * n_dot_l
                        r = tl.reflect(light_dir,
                                       normal)  # Direction of reflected light
                        r_dot_v = max(r.dot(-vd), 0.0)
                        specular = self.specular * pow(r_dot_v, self.shininess)
                        shaded_color = tl.vec4(
                            (diffuse + specular + self.ambient) *
                            sample_color.xyz * opacity * self.light_color,
                            opacity)
                        self.render_tape[
                            i, j,
                            0] = (1.0 - self.render_tape[i, j, 0].w
                                  ) * shaded_color + self.render_tape[i, j, 0]
Exemplo n.º 11
0
 def set_view(self, camera):
     self.viewdir[None] = (camera.L2W[None].inverse() @ ts.vec4(self.dir[None], 0)).xyz
Exemplo n.º 12
0
 def set_view(self, camera):
     # TODO: merge t3.PointLight with t3.Light by considering `w`?
     self.viewpos[None] = (camera.L2W[None].inverse() @ ts.vec4(self.pos[None], 1)).xyz
Exemplo n.º 13
0
def render_triangle(model, camera, face):
    scene = model.scene
    L2C = model.L2C[None]  # Local to Camera, i.e. ModelView in OpenGL
    posa, posb, posc = face.pos
    texa, texb, texc = face.tex
    nrma, nrmb, nrmc = face.nrm
    posa = (L2C @ ts.vec4(posa, 1)).xyz
    posb = (L2C @ ts.vec4(posb, 1)).xyz
    posc = (L2C @ ts.vec4(posc, 1)).xyz
    nrma = (L2C @ ts.vec4(nrma, 0)).xyz
    nrmb = (L2C @ ts.vec4(nrmb, 0)).xyz
    nrmc = (L2C @ ts.vec4(nrmc, 0)).xyz

    pos_center = (posa + posb + posc) / 3
    if ti.static(camera.type == camera.ORTHO):
        pos_center = ts.vec3(0.0, 0.0, 1.0)

    dpab = posa - posb
    dpac = posa - posc
    dtab = texa - texb
    dtac = texa - texc

    normal = ts.cross(dpab, dpac)

    # NOTE: the normal computation indicates that a front-facing face should
    # be COUNTER-CLOCKWISE, i.e., glFrontFace(GL_CCW);
    # this is to be compatible with obj model loading.
    if ts.dot(pos_center, normal) <= 0:
        tan, bitan = compute_tangent(-dpab, -dpac, -dtab,
                                     -dtac)  # TODO: node-ize this

        clra = model.vertex_shader(
            posa, texa, nrma, tan,
            bitan)  # TODO: interpolate tan and bitan? merge with nrm?
        clrb = model.vertex_shader(posb, texb, nrmb, tan, bitan)
        clrc = model.vertex_shader(posc, texc, nrmc, tan, bitan)

        A = camera.uncook(posa)
        B = camera.uncook(posb)
        C = camera.uncook(posc)
        scr_norm = ts.cross(A - C, B - A)
        if scr_norm != 0:  # degenerate to 'line' if zero
            B_A = (B - A) / scr_norm
            A_C = (A - C) / scr_norm

            shake = ts.vec2(0.0)
            if ti.static(camera.fb.n_taa):
                for i, s in ti.static(
                        enumerate(map(ti.Vector,
                                      TAA_SHAKES[:camera.fb.n_taa]))):
                    if camera.fb.itaa[None] == i:
                        shake = s * 0.5

            # screen space bounding box
            M = int(ti.floor(min(A, B, C) - 1))
            N = int(ti.ceil(max(A, B, C) + 1))
            M = ts.clamp(M, 0, ti.Vector(camera.fb.res))
            N = ts.clamp(N, 0, ti.Vector(camera.fb.res))
            for X in ti.grouped(ti.ndrange((M.x, N.x), (M.y, N.y))):
                # barycentric coordinates using the area method
                X_A = X - A + shake
                w_C = ts.cross(B_A, X_A)
                w_B = ts.cross(A_C, X_A)
                w_A = 1 - w_C - w_B

                # draw
                eps = ti.get_rel_eps() * 0.2
                is_inside = w_A >= -eps and w_B >= -eps and w_C >= -eps
                if not is_inside:
                    continue

                # https://gitee.com/zxtree2006/tinyrenderer/blob/master/our_gl.cpp
                if ti.static(camera.type != camera.ORTHO):
                    bclip = ts.vec3(w_A / posa.z, w_B / posb.z, w_C / posc.z)
                    bclip /= bclip.x + bclip.y + bclip.z
                    w_A, w_B, w_C = bclip

                depth = (posa.z * w_A + posb.z * w_B + posc.z * w_C)
                if camera.fb.atomic_depth(X, depth):
                    continue

                clr = [
                    a * w_A + b * w_B + c * w_C
                    for a, b, c in zip(clra, clrb, clrc)
                ]
                camera.fb.update(X, model.pixel_shader(*clr))