def _create_renderer(w=640, h=480, rt=np.zeros(3), t=np.zeros(3), f=None, c=None, k=None, near=.5, far=10.): f = np.array([w, w]) / 2. if f is None else f c = np.array([w, h]) / 2. if c is None else c k = np.zeros(5) if k is None else k rn = ColoredRenderer() rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k) rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w} return rn
def test_occlusion(self): if visualize: import matplotlib.pyplot as plt plt.ion() # Create renderer import chumpy as ch import numpy as np from opendr.renderer import TexturedRenderer, ColoredRenderer #rn = TexturedRenderer() rn = ColoredRenderer() # Assign attributes to renderer from util_tests import get_earthmesh m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3)) rn.texture_image = m.texture_image rn.ft = m.ft rn.vt = m.vt m.v[:,2] = np.mean(m.v[:,2]) # red is front and zero # green is back and 1 t0 = ch.array([1,0,.1]) t1 = ch.array([-1,0,.1]) v0 = ch.array(m.v) + t0 if False: v1 = ch.array(m.v*.4 + np.array([0,0,3.8])) + t1 else: v1 = ch.array(m.v) + t1 vc0 = v0*0 + np.array([[.4,0,0]]) vc1 = v1*0 + np.array([[0,.4,0]]) vc = ch.vstack((vc0, vc1)) v = ch.vstack((v0, v1)) f = np.vstack((m.f, m.f+len(v0))) w, h = (320, 240) rn.camera = ProjectPoints(v=v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5)) rn.camera.t = ch.array([0,0,-2.5]) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} m.vc = v.r*0 + np.array([[1,0,0]]) rn.set(v=v, f=f, vc=vc) t0[:] = np.array([1.4, 0, .1-.02]) t1[:] = np.array([-0.6, 0, .1+.02]) target = rn.r if visualize: plt.figure() plt.imshow(target) plt.title('target') plt.figure() plt.show() im_orig = rn.r.copy() from cvwrap import cv2 tr = t0 eps_emp = .02 eps_pred = .02 #blur = lambda x : cv2.blur(x, ksize=(5,5)) blur = lambda x : x for tr in [t0, t1]: if tr is t0: sum_limits = np.array([2.1e+2, 6.9e+1, 1.6e+2]) else: sum_limits = [1., 5., 4.] if visualize: plt.figure() for i in range(3): dr_pred = np.array(rn.dr_wrt(tr[i]).todense()).reshape(rn.shape) * eps_pred dr_pred = blur(dr_pred) # central differences tr[i] = tr[i].r + eps_emp/2. rn_greater = rn.r.copy() tr[i] = tr[i].r - eps_emp/1. rn_lesser = rn.r.copy() tr[i] = tr[i].r + eps_emp/2. dr_emp = blur((rn_greater - rn_lesser) * eps_pred / eps_emp) dr_pred_shown = np.clip(dr_pred, -.5, .5) + .5 dr_emp_shown = np.clip(dr_emp, -.5, .5) + .5 if visualize: plt.subplot(3,3,i+1) plt.imshow(dr_pred_shown) plt.title('pred') plt.axis('off') plt.subplot(3,3,3+i+1) plt.imshow(dr_emp_shown) plt.title('empirical') plt.axis('off') plt.subplot(3,3,6+i+1) diff = np.abs(dr_emp - dr_pred) if visualize: plt.imshow(diff) diff = diff.ravel() if visualize: plt.title('diff (sum: %.2e)' % (np.sum(diff))) plt.axis('off') # print 'dr pred sum: %.2e' % (np.sum(np.abs(dr_pred.ravel())),) # print 'dr emp sum: %.2e' % (np.sum(np.abs(dr_emp.ravel())),) #import pdb; pdb.set_trace() self.assertTrue(np.sum(diff) < sum_limits[i])
## Load SMPL model (here we load the female model) m = load_model('../../models/basicModel_f_lbs_10_207_0_v1.0.0.pkl') ## Assign random pose and shape parameters m.pose[:] = np.random.rand(m.pose.size) * .2 m.betas[:] = np.random.rand(m.betas.size) * .03 m.pose[0] = np.pi ## Create OpenDR renderer rn = ColoredRenderer() ## Assign attributes to renderer w, h = (640, 480) rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w,w])/2., c=np.array([w,h])/2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=m, f=m.f, bgcolor=np.zeros(3)) ## Construct point light source rn.vc = LambertianPointLight( f=m.f, v=rn.v, num_verts=len(m), light_pos=np.array([-1000,-1000,-2000]), vc=np.ones_like(m)*.9, light_color=np.array([1., 1., 1.])) ## Show it using OpenCV import cv2 cv2.imwrite( "./test.png", rn.r )
'/data/Guha/GR/code/GR19/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl') m2.betas[:] = np.random.rand(m2.betas.size) * .03 ## Create OpenDR renderer rn1 = ColoredRenderer() rn2 = ColoredRenderer() ## Assign attributes to renderer w, h = (640, 480) rn1.camera = ProjectPoints(v=m1, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn1.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn1.set(v=m1, f=m1.f, bgcolor=np.zeros(3)) rn2.camera = ProjectPoints(v=m2, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn2.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn2.set(v=m2, f=m2.f, bgcolor=np.zeros(3)) ## Construct point light source rn1.vc = LambertianPointLight(f=m1.f, v=rn1.v, num_verts=len(m1),
def test_occlusion(self): if visualize: import matplotlib.pyplot as plt plt.ion() # Create renderer import chumpy as ch import numpy as np from opendr.renderer import TexturedRenderer, ColoredRenderer #rn = TexturedRenderer() rn = ColoredRenderer() # Assign attributes to renderer from util_tests import get_earthmesh m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3)) rn.texture_image = m.texture_image rn.ft = m.ft rn.vt = m.vt m.v[:,2] = np.mean(m.v[:,2]) # red is front and zero # green is back and 1 t0 = ch.array([1,0,.1]) t1 = ch.array([-1,0,.1]) v0 = ch.array(m.v) + t0 if False: v1 = ch.array(m.v*.4 + np.array([0,0,3.8])) + t1 else: v1 = ch.array(m.v) + t1 vc0 = v0*0 + np.array([[.4,0,0]]) vc1 = v1*0 + np.array([[0,.4,0]]) vc = ch.vstack((vc0, vc1)) v = ch.vstack((v0, v1)) f = np.vstack((m.f, m.f+len(v0))) w, h = (320, 240) rn.camera = ProjectPoints(v=v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5)) rn.camera.t = ch.array([0,0,-2.5]) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} m.vc = v.r*0 + np.array([[1,0,0]]) rn.set(v=v, f=f, vc=vc) t0[:] = np.array([1.4, 0, .1-.02]) t1[:] = np.array([-0.6, 0, .1+.02]) target = rn.r if visualize: plt.figure() plt.imshow(target) plt.title('target') plt.figure() plt.show() im_orig = rn.r.copy() from cvwrap import cv2 tr = t0 eps_emp = .02 eps_pred = .02 #blur = lambda x : cv2.blur(x, ksize=(5,5)) blur = lambda x : x for tr in [t0, t1]: if tr is t0: sum_limits = np.array([2.1e+2, 6.9e+1, 1.6e+2]) else: sum_limits = [1., 5., 4.] if visualize: plt.figure() for i in range(3): dr_pred = np.array(rn.dr_wrt(tr[i]).toarray()).reshape(rn.shape) * eps_pred dr_pred = blur(dr_pred) # central differences tr[i] = tr[i].r + eps_emp/2. rn_greater = rn.r.copy() tr[i] = tr[i].r - eps_emp/1. rn_lesser = rn.r.copy() tr[i] = tr[i].r + eps_emp/2. dr_emp = blur((rn_greater - rn_lesser) * eps_pred / eps_emp) dr_pred_shown = np.clip(dr_pred, -.5, .5) + .5 dr_emp_shown = np.clip(dr_emp, -.5, .5) + .5 if visualize: plt.subplot(3,3,i+1) plt.imshow(dr_pred_shown) plt.title('pred') plt.axis('off') plt.subplot(3,3,3+i+1) plt.imshow(dr_emp_shown) plt.title('empirical') plt.axis('off') plt.subplot(3,3,6+i+1) diff = np.abs(dr_emp - dr_pred) if visualize: plt.imshow(diff) diff = diff.ravel() if visualize: plt.title('diff (sum: %.2e)' % (np.sum(diff))) plt.axis('off') # print 'dr pred sum: %.2e' % (np.sum(np.abs(dr_pred.ravel())),) # print 'dr emp sum: %.2e' % (np.sum(np.abs(dr_emp.ravel())),) #import pdb; pdb.set_trace() self.assertTrue(np.sum(diff) < sum_limits[i])
def render(self, image, cam, K, verts, face, draw_id=''): # roll_axis = torch.Tensor([1, 0, 0]).unsqueeze(0) # .expand(1, -1) # alpha = torch.Tensor([np.pi] * 1).unsqueeze(1) * 0.5 # pose[0, :3] = axis_angle_add(pose[0, :3].unsqueeze(0), roll_axis, alpha) # pose[:3] *= torch.Tensor([1, -1, -1]) # self.m.betas[:] = shape.numpy()[0] # self.m.pose[:] = pose.numpy()[0] # m.betas[:] = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) # m.betas[:] = np.array([0.]*10) # m.pose[:] = np.array([0.]*72) # m.pose[0] = -np.pi # m.pose[2] = 0.5 # m.pose[2] = np.pi # m.betas[0] = 0.5 ## Create OpenDR renderer rn = ColoredRenderer() # print(rn.msaa) # # rn.msaa = True ## Assign attributes to renderer w, h = (224 * self.ratio, 224 * self.ratio) # w, h = (1000, 1000) f = np.array([K[0, 0], K[1, 1]]) * float(self.ratio) c = np.array([K[0, 2], K[1, 2]]) * float(self.ratio) t = np.array([cam[1], cam[2], 2 * K[0, 0] / (224. * cam[0] + 1e-9)]) # t = np.array([0, 0, 5.]) # c = np.array([K[0, 0, 2], 112 - K[0, 1, 1] * float(cam[0, 2])]) * float(self.ratio) # rn.camera = ProjectPoints(v=m*np.array([1,-1,-1]), rt=np.zeros(3), t=np.array([0, 0, 5.]), f=f, c=c, k=np.zeros(5)) rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=t, f=f, c=c, k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 100., 'width': w, 'height': h} # [:, [1, 0, 2]] albedo = np.ones_like(verts) * .9 # albedo(6890, 3)(6890, 3)(13776, 3) color1 = np.array([0.85490196, 0.96470588, 0.96470588]) # light steel blue # color1 = np.array([i / 255. for i in [176, 196, 222]]) # color1 = np.array([i / 255. for i in [168, 173, 180]]) # color2 = np.array([i / 255. for i in [255, 244, 229]]) color2 = np.array([i / 255. for i in [181, 178, 146]]) color3 = np.array([i / 255. for i in [190, 178, 167]]) # beige # color4 = np.array([i / 255. for i in [245, 245, 220]]) # wheat color4 = np.array([i / 255. for i in [245, 222, 179]]) # thistle # color5 = np.array([i / 255. for i in [216, 191, 216]]) color5 = np.array([i / 255. for i in [183, 166, 173]]) # aqua marine color6 = np.array([i / 255. for i in [127, 255, 212]]) # turquoise color7 = np.array([i / 255. for i in [64, 224, 208]]) # medium turquoise color8 = np.array([i / 255. for i in [72, 209, 204]]) # honeydew color9 = np.array([i / 255. for i in [240, 255, 240]]) # burly wood color10 = np.array([i / 255. for i in [222, 184, 135]]) # sandy brown color11 = np.array([i / 255. for i in [244, 164, 96]]) # floral white Ours color12 = np.array([i / 255. for i in [255, 250, 240]]) # medium slate blue SPIN color13 = np.array([i / 255. for i in [72 * 2.5, 61 * 2.5, 255]]) # color_list = [color1, color2, color3, color4, color5] color_list = [ color6, color7, color8, color9, color10, color11, color12, color13 ] # color_list = color_list + [color13] # color = color_list[int(len(color_list) * float(np.random.rand(1)))] # color = color_list[-1] if self.color in ['white']: color = color12 color0 = np.array([1, 1, 1]) color1 = np.array([1, 1, 1]) color2 = np.array([0.7, 0.7, 0.7]) elif self.color in ['blue']: color = color13 color0 = color color1 = color color2 = color # rn.set(v=m*np.array([1,-1,1]), f=m.f, bgcolor=np.zeros(3)) rn.set(v=verts, f=face, vc=color, bgcolor=np.zeros(3)) # rn.set(v=rotateY(verts, np.radians(90)), f=self.m.f, bgcolor=np.zeros(3)) ## Construct point light source # rn.vc = LambertianPointLight( # f=m.f, # v=rn.v, # num_verts=len(m), # light_pos=np.array([-1000,-1000,-2000]), # vc=np.ones_like(m)*.9, # light_color=np.array([1., 1., 1.])) yrot = np.radians(120) ''' rn.vc = LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=np.array([-200, -100, -100]), vc=albedo, light_color=color) # Construct Left Light rn.vc += LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=np.array([500, 10, -200]), vc=albedo, light_color=color) # Construct Right Light rn.vc += LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=np.array([-300, 100, 600]), vc=albedo, light_color=color) ''' # 1. 1. 0.7 rn.vc = LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([-200, -100, -100]), yrot), vc=albedo, light_color=color0) # Construct Left Light rn.vc += LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([800, 10, 300]), yrot), vc=albedo, light_color=color1) # Construct Right Light rn.vc += LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([-500, 500, 1000]), yrot), vc=albedo, light_color=color2) # render_smpl = rn.r ## Construct point light source # rn.vc += SphericalHarmonics(light_color=np.array([1., 1., 1.])) img_orig = np.transpose(image, (1, 2, 0)) img_resized = resize( img_orig, (img_orig.shape[0] * self.ratio, img_orig.shape[1] * self.ratio), anti_aliasing=True) # ax_smpl = plt.subplot(2, 2, 2) # plt.imshow(rn.r) # plt.axis('off') # print(max(rn.r)) # print(min(rn.r)) # fig = plt.figure() img_smpl = img_resized.copy() img_smpl[rn.visibility_image != 4294967295] = rn.r[ rn.visibility_image != 4294967295] ''' ax_stack = plt.subplot(2, 2, 3) ax_stack.imshow(img_smpl) plt.axis('off') ''' rn.set(v=rotateY(verts, np.radians(90)), f=face, bgcolor=np.zeros(3)) render_smpl = rn.r # rn.set(v=rotateY(verts, np.radians(90)), f=self.m.f, bgcolor=np.zeros(3)) render_smpl_rgba = np.zeros( (render_smpl.shape[0], render_smpl.shape[1], 4)) render_smpl_rgba[:, :, :3] = render_smpl render_smpl_rgba[:, :, 3][rn.visibility_image != 4294967295] = 255 ''' ax_img = plt.subplot(2, 2, 1) ax_img.imshow(np.transpose(image, (1, 2, 0))) plt.axis('off') ax_smpl = plt.subplot(2, 2, 2) ax_smpl.imshow(render_smpl_rgba) plt.axis('off') ''' return img_orig, img_resized, img_smpl, render_smpl_rgba # img_uv = np.transpose(uvimage_front[0].cpu().numpy(), (1, 2, 0)) # # img_uv = resize(img_uv, (img_uv.shape[0], img_uv.shape[1]), anti_aliasing=True) # img_uv[img_uv == 0] = img_show[img_uv == 0] # plt.show() # save_path = './notebooks/output/upimgs/' save_path = './notebooks/output/demo_results-v2/' if not os.path.exists(save_path): os.makedirs(save_path) matplotlib.image.imsave(save_path + 'img_' + draw_id + '.png', img_orig) matplotlib.image.imsave(save_path + 'img_smpl_' + draw_id + '.png', img_smpl) matplotlib.image.imsave(save_path + 'smpl_' + draw_id + '.png', render_smpl_rgba)