Exemplo n.º 1
0
 def __init__(self, pos, look, up, f_dist, fov):
     self.pos = pos
     self.look_dir = pykay.normalize(look - pos)
     right = pykay.normalize(torch.cross(self.look_dir,
                                         pykay.normalize(up)))
     self.up = pykay.normalize(torch.cross(right, self.look_dir))
     self.f_dist = f_dist
     self._fov = fov  #half_height/f_dist
Exemplo n.º 2
0
def shade_blinn_phong(geo_id, prim_id, p, wo):
    hit_obj = objs[geo_id]
    n = torch.Tensor(
        [hit_obj.normals[3*prim_id],
         hit_obj.normals[3*prim_id+1],
         hit_obj.normals[3*prim_id+2]])
    h = pykay.normalize(-light_dir-wo)
    t = n.dot(h)
    d_temp=torch.max(torch.Tensor([n.dot(-light_dir), 0.0]))
    s_temp = torch.pow(torch.max(torch.Tensor([t, 0.0])), Shininess)
    return LightColor/(100-p[1]*p[1])*(Kd*d_temp+Ks*s_temp)
Exemplo n.º 3
0
 def __init__(self, fdir, filename):
     """Loads a Wavefront OBJ file. """
     self.vertices = []
     self.faces = []
     self.normals = []
     self.vcount = 0
     self.fcount = 0
     self.light = torch.Tensor([0, 0, 0]).requires_grad_(
         False)  # by default,no light
     for line in open(fdir + filename, "r"):
         if line.startswith('#'):
             continue
         if line.startswith('o'):
             continue
         values = line.split()
         if not values:
             continue
         if values[0] == 'v':
             self.vertices.append(float(values[1]))
             self.vertices.append(float(values[2]))
             self.vertices.append(float(values[3]))
             self.vcount += 1
         elif values[0] == 'f':
             self.faces.append(int(values[1]) - 1)
             self.faces.append(int(values[2]) - 1)
             self.faces.append(int(values[3]) - 1)
             self.fcount += 1
     for i in range(self.fcount):
         face_idx = 3 * i
         p0 = [
             self.vertices[3 * (self.faces[face_idx] + 0)],
             self.vertices[3 * (self.faces[face_idx] + 0) + 1],
             self.vertices[3 * (self.faces[face_idx] + 0) + 2]
         ]
         p1 = [
             self.vertices[3 * (self.faces[face_idx + 1])],
             self.vertices[3 * (self.faces[face_idx + 1]) + 1],
             self.vertices[3 * (self.faces[face_idx + 1]) + 2]
         ]
         p2 = [
             self.vertices[3 * (self.faces[face_idx + 2])],
             self.vertices[3 * (self.faces[face_idx + 2]) + 1],
             self.vertices[3 * (self.faces[face_idx + 2]) + 2]
         ]
         d0 = torch.Tensor(p1) - torch.Tensor(p0)
         d1 = torch.Tensor(p2) - torch.Tensor(p1)
         n = pykay.normalize(torch.cross(d0, d1))
         for i in range(3):
             self.normals.append(n[i])
Exemplo n.º 4
0
    def forward(ctx, shape,p_num,indix,tri_num,mv_shape,f_dist,pic_size):
        image = torch.zeros(pic_size, pic_size, 3)
        
        normals=[]
        colors=[]
        for i in indix:
            p=[]
            for j in range(3):
                p.append(mv_shape[i[j]])
            e0=(p[1]-p[0])[0:3]
            e1=(p[2]-p[1])[0:3]
            n=torch.cross(e0,e1)
            n=pykay.normalize(n)
            #shade
            temp=torch.tensor([0.0,n.dot(torch.tensor([0.0,1.0,0.0]))])
            c=torch.max(temp)
            colors.append(c*torch.tensor([1.0,1.0,1.0]))
            normals.append(n)
        normals=torch.stack(normals)
        colors=torch.stack(colors)
        
        start = time.time()
        #C++ renderer
        kay.render(kay.float_ptr(mv_shape.data_ptr()), 
                p_num,
                kay.unsigned_int_ptr(indix.data_ptr()),
                tri_num,
                f_dist,  
                pic_size,
                kay.float_ptr(colors.data_ptr()),
                kay.float_ptr(image.data_ptr()))

        time_elapsed = time.time() - start
        print('Forward pass, time: %.5f s' % time_elapsed)
        #parameters pass
        ctx.shape = shape
        ctx.p_num = p_num
        ctx.indix = indix
        ctx.tri_num = tri_num
        ctx.normals=normals#later use
        ctx.colors=colors#later use
        ctx.pic_size=pic_size
        ctx.mv_shape=mv_shape
        ctx.f_dist=f_dist
        return image
Exemplo n.º 5
0
def render():
    global image
    image = torch.zeros(pic_res, pic_res, 3)
    for i in tqdm(range(pic_res)):
        for j in range(pic_res):
            for k in range(ssn):
                pixel_pos = left_up - (i+random.random())*2*temp / \
                    pic_res*c.up + (j+random.random())*2*temp/pic_res*right
                dir = pykay.normalize(pixel_pos - c.pos)
                r = pykay.ray(c.pos, dir)
                rc = rt.intersect(r.o[0], r.o[1], r.o[2],
                                  r.d[0], r.d[1], r.d[2])
                if rc.hit_flag:
                    image[i][j] += shade_blinn_phong(rc.geo_id, rc.prim_id,pixel_pos+dir*rc.dist, dir)+Ia*Ka#Ia*ao_image[i][j]#
                else:
                    image[i][j] += torch.Tensor([0, 0, 0.0]).requires_grad
            image[i][j] /= ssn
    return image
Exemplo n.º 6
0
def shade_pbr(geo_id, prim_id, p, wo):  #wo=view
    hit_obj = objs[geo_id]
    n = torch.Tensor([
        hit_obj.normals[3 * prim_id], hit_obj.normals[3 * prim_id + 1],
        hit_obj.normals[3 * prim_id + 2]
    ])
    h = pykay.normalize(-light_dir - wo)
    h_dot_v = h.dot(-wo)
    n_dot_h = n.dot(h)
    n_dot_v = n.dot(-wo)
    n_dot_l = n.dot(-light_dir)
    l_dot_h = h.dot(-light_dir)
    D = torch.exp(
        torch.Tensor([
            (n_dot_h * n_dot_h - 1) / (rough * rough * n_dot_h * n_dot_h)
        ])) / (4 * rough * rough * torch.pow(n_dot_h, 4))
    F0 = 0.03
    F = F0 + (1 - F0) * torch.pow(h_dot_v, 5)
    Ga = 2 * n_dot_h * n_dot_v / h_dot_v
    Gb = 2 * n_dot_h * n_dot_l / l_dot_h
    G = torch.min(torch.Tensor([1, Ga, Gb]))
    d_temp = torch.max(torch.Tensor([n.dot(-light_dir), 0.0]))
    s_temp = 0.25 * D * F * G / (n.dot(-light_dir) * n.dot(-wo))
    return LightColor / (100 - p[1] * p[1]) * (Kd * d_temp + Ks * s_temp)
Exemplo n.º 7
0
center = c.pos+c.f_dist*c.look_dir
temp = c.f_dist*c._fov  # half height
right = torch.cross(c.look_dir, c.up)
left_up = center+temp*c.up-temp*right

# Blinn-Phong Model Shader
Ks = torch.Tensor([0.5, 0, 0.0])
Ks.requires_grad = True
Kd = torch.Tensor([0.4, 0, 0.0])
Kd.requires_grad = True
Ka = torch.Tensor([0.1, 0, 0.0])
Ka.requires_grad = True
Ia = torch.Tensor([1.0, 1.0, 1.0])
Ia.requires_grad = True
LightColor = torch.Tensor([100.0, 100.0, 100.0])
light_dir = pykay.normalize(torch.Tensor([0, -1, 0]))
Shininess = torch.Tensor([10.0])
Shininess.requires_grad = True

ssn = 0

ao_image = torch.zeros(pic_res, pic_res, 3)
for i in tqdm(range(pic_res)):
    for j in range(pic_res):
        for k in range(ssn):
            pixel_pos = left_up - (i+random.random())*2*temp / \
            pic_res*c.up + (j+random.random())*2*temp/pic_res*right
            dir = pykay.normalize(pixel_pos - c.pos)
            r = pykay.ray(c.pos, dir)
            rc = rt.intersect(r.o[0], r.o[1], r.o[2],r.d[0], r.d[1], r.d[2])
            if rc.hit_flag: