Esempio n. 1
0
def render():
    # print(cast_ray(camera_pos, ti.Vector([0, 0, -1.0]).normalized()))
    for u, v in color_buffer:
        aspect_ratio = res[0] / res[1]
        scale = ti.tan(0.5 * fov * math.pi / 180)
        x = (2.0 * (u + ti.random()) / res[0]) - 1
        y = (2.0 * (v + ti.random()) / res[1]) - 1
        x *= scale * aspect_ratio
        y *= scale
        d = ti.Vector([x, y, -1.0]).normalized()

        depth = 0
        pos = camera_pos
        output = ti.Vector([1.0, 1.0, 1.0])
        coeff = 0

        while depth < max_ray_depth:
            t, norm, obj_id = cast_ray(pos, d)
            depth += 1
            if obj_id == 1:  # ERROR
                coeff = 1
                depth = max_ray_depth
            else:
                if t != inf:
                    pos = pos + (t + 1e-4) * d
                    d = out_dir(norm)
                    output *= 0.6
                else:
                    depth = max_ray_depth
        color_buffer[u, v] += output * coeff
Esempio n. 2
0
 def render(self, camera):
     for I in ti.grouped(ti.ndrange(*camera.res)):
         if camera.fb.idepth[I] != 0:
             continue
         id = I / ts.vec(*camera.res) * 2 - 1
         dir = ts.vec3(id * ti.tan(camera.fov), 1.0)
         dir = v4trans(self.L2C[None].inverse(), dir, 0).normalized()
         color = self.sample(dir)
         camera.fb.update(I, dict(img=color))
Esempio n. 3
0
def get_proj(fovY, ratio, zn, zf):
    #  d3d perspective https://docs.microsoft.com/en-us/windows/win32/direct3d9/d3dxmatrixperspectiverh 
    # rember it is col major
    # xScale     0          0              0
    # 0        yScale       0              0
    # 0        0        zf/(zn-zf)        -1
    # 0        0        zn*zf/(zn-zf)      0
    # where:
    # yScale = cot(fovY/2)  
    # xScale = yScale / aspect ratio
    yScale = 1.0    / ti.tan(fovY/2)
    xScale = yScale / ratio
    return ti.Matrix([ [xScale, 0.0, 0.0, 0.0], [0.0, yScale, 0.0, 0.0], [0.0, 0.0, zf/(zn-zf), zn*zf/(zn-zf)], [0.0, 0.0, -1.0, 0.0] ])
Esempio n. 4
0
 def func():
     xi[0] = -yi[None]
     xi[1] = ~yi[None]
     xi[2] = ti.logical_not(yi[None])
     xi[3] = ti.abs(yi[None])
     xf[0] = -yf[None]
     xf[1] = ti.abs(yf[None])
     xf[2] = ti.sqrt(yf[None])
     xf[3] = ti.sin(yf[None])
     xf[4] = ti.cos(yf[None])
     xf[5] = ti.tan(yf[None])
     xf[6] = ti.asin(yf[None])
     xf[7] = ti.acos(yf[None])
     xf[8] = ti.tanh(yf[None])
     xf[9] = ti.floor(yf[None])
     xf[10] = ti.ceil(yf[None])
     xf[11] = ti.exp(yf[None])
     xf[12] = ti.log(yf[None])
Esempio n. 5
0
def sampleEquiAngular(
        u: ti.f32,
        maxDistance: ti.f32,
        rayOrigin: ti.Vector,  # vec3
        rayDir: ti.Vector,  # vec3
        lightPos: ti.Vector  # vec3
):
    # get coord of closest point to light along(infinite) ray
    delta = dot(lightPos - rayOrigin, rayDir)

    # get distance this point is from light
    D = length(rayOrigin + delta * rayDir - lightPos)

    # get angle of endpoints
    thetaA = ti.atan2(0.0 - delta, D)
    thetaB = ti.atan2(maxDistance - delta, D)

    # take sample
    t = D * ti.tan(mix(thetaA, thetaB, u))
    dist = delta + t
    pdf = D / ((thetaB - thetaA) * (D * D + t * t))
    return (dist, pdf)
Esempio n. 6
0
def generate_parameters():
    lookfrom[None] = [
        cam_r[None] * ti.sin(cam_theta[None]) * ti.sin(cam_phi[None]),
        cam_r[None] * ti.cos(cam_phi[None]),
        cam_r[None] * ti.cos(cam_theta[None]) * ti.sin(cam_phi[None])
    ] + lookat[None]
    dir_l_pos[None] = [
        ti.sin(dir_l_theta[None]) * ti.sin(dir_l_phi[None]),
        ti.cos(dir_l_phi[None]),
        ti.cos(dir_l_theta[None]) * ti.sin(dir_l_phi[None])
    ]
    w = normalize(lookfrom[None] - lookat[None])
    u = normalize(up[None].cross(w))
    v = w.cross(u)

    half_height = ti.tan(fov / 2.0)
    half_width = aspect * half_height
    cam_origin[None] = lookfrom[None]
    cam_lower_left_corner[
        None] = cam_origin - half_width * u - half_height * v - w
    cam_horizontal[None] = 2 * half_width * u
    cam_vertical[None] = 2 * half_height * v
    def get_ray_direction(self, orig, view_dir, x: float, y: float):
        ''' Compute ray direction for perspecive camera.

        Args:
            orig (tl.vec3): Camera position
            view_dir (tl.vec3): View direction, normalized
            x (float): Image coordinate in [0,1] along width
            y (float): Image coordinate in [0,1] along height

        Returns:
            tl.vec3: Ray direction from camera origin to pixel specified through `x` and `y`
        '''
        u = x - 0.5
        v = y - 0.5

        up = tl.vec3(0.0, 1.0, 0.0)
        right = tl.cross(view_dir, up).normalized()
        up = tl.cross(right, view_dir).normalized()
        near_h = 2.0 * ti.tan(self.fov_rad) * self.near
        near_w = near_h * self.aspect
        near_m = orig + self.near * view_dir
        near_pos = near_m + u * near_w * right + v * near_h * up

        return (near_pos - orig).normalized()