def on_changed(self, which): if not hasattr(self, '_lpl'): self.add_dterm('_lpl', maximum(multiply(a=multiply()), 0.0)) if not hasattr(self, 'ldn'): self.ldn = LightDotNormal(self.v.r.size/3) if not hasattr(self, 'vn'): logger.info('LambertianPointLight using auto-normals. This will be slow for derivative-free computations.') self.vn = VertNormals(f=self.f, v=self.v) self.vn.needs_autoupdate = True if 'v' in which and hasattr(self.vn, 'needs_autoupdate') and self.vn.needs_autoupdate: self.vn.v = self.v ldn_args = {k: getattr(self, k) for k in which if k in ('light_pos', 'v', 'vn')} if len(ldn_args) > 0: self.ldn.set(**ldn_args) self._lpl.a.a.a = self.ldn.reshape((-1,1)) if 'num_verts' in which or 'light_color' in which: # nc = self.num_channels # IS = np.arange(self.num_verts*nc) # JS = np.repeat(np.arange(self.num_verts), 3) # data = (row(self.light_color)*np.ones((self.num_verts, 3))).ravel() # mtx = sp.csc_matrix((data, (IS,JS)), shape=(self.num_verts*3, self.num_verts)) self._lpl.a.a.b = self.light_color.reshape((1,self.num_channels)) if 'vc' in which: self._lpl.a.b = self.vc.reshape((-1,self.num_channels))
def vertex_visibility_angle(self, camera): n = VertNormals(camera.v, self.f) v_cam = camera.v.r.dot(cv2.Rodrigues(camera.rt.r)[0]) + camera.t.r n_cam = n.r.dot(cv2.Rodrigues(camera.rt.r)[0]) return np.sum(v_cam / (np.linalg.norm(v_cam, axis=1).reshape(-1, 1)) * -1 * n_cam, axis=1)
def test_earth(): m = get_earthmesh(trans=ch.array([0, 0, 0]), rotation=ch.zeros(3)) # Create V, A, U, f: geometry, brightness, camera, renderer V = ch.array(m.v) A = SphericalHarmonics(vn=VertNormals(v=V, f=m.f), components=[3., 2., 0., 0., 0., 0., 0., 0., 0.], light_color=ch.ones(3)) # camera U = ProjectPoints(v=V, f=[w, w], c=[w / 2., h / 2.], k=ch.zeros(5), t=ch.zeros(3), rt=ch.zeros(3)) f = TexturedRenderer(vc=A, camera=U, f=m.f, bgcolor=[0., 0., 0.], texture_image=m.texture_image, vt=m.vt, ft=m.ft, frustum={ 'width': w, 'height': h, 'near': 1, 'far': 20 }) # Parameterize the vertices translation, rotation = ch.array([0, 0, 8]), ch.zeros(3) f.v = translation + V.dot(Rodrigues(rotation)) observed = f.r np.random.seed(1) # this is reactive # in the sense that changes to values will affect function which depend on them. translation[:] = translation.r + np.random.rand(3) rotation[:] = rotation.r + np.random.rand(3) * .2 # Create the energy E_raw = f - observed E_pyr = gaussian_pyramid(E_raw, n_levels=6, normalization='size') Image.fromarray((observed * 255).astype(np.uint8)).save( os.path.join(save_dir, "reference.png")) step = 0 Image.fromarray((f.r * 255).astype(np.uint8)).save( os.path.join(save_dir, "step_{:05d}.png".format(step))) print('OPTIMIZING TRANSLATION, ROTATION, AND LIGHT PARMS') free_variables = [translation, rotation] ch.minimize({'pyr': E_pyr}, x0=free_variables, callback=create_callback(f)) ch.minimize({'raw': E_raw}, x0=free_variables, callback=create_callback(f))
def InstanceSMPL(frame_id): print g_paramfiles[frame_id] trans = [] betas = [] poses = [] exps = [] with open(g_paramfiles[frame_id]) as f: SMPLParams = pickle.load(f) if not type(SMPLParams) == list: smplParam = copy.deepcopy(SMPLParams) SMPLParams = [] SMPLParams.append(smplParam) for idx, cParam in enumerate(SMPLParams): trans.append(cParam['trans']) betas.append(cParam['betas']) poses.append(cParam['pose']) if g_args.type == 'ADAM': exps.append(cParam['faces']) inds = adamWrapper.f + idx * adamWrapper.size[0] else: inds = smplWrapper.f + idx * smplWrapper.size[0] if idx == 0: cinds = inds else: cinds = np.concatenate((cinds, inds), axis=0) trans = np.asarray(trans) betas = np.asarray(betas) poses = np.asarray(poses) exps = np.asarray(exps) if (g_args.type == 'SMPL'): v, _ = smplWrapper(betas, poses) elif (g_args.type == 'ADAM'): v, _ = adamWrapper(betas, poses, exps) v += np.expand_dims(trans, axis=1) v = np.reshape(v, (-1, 3)) if (g_args.type == 'SMPL'): v = v * 100.0 #calculating normals from opendr.geometry import VertNormals vns = VertNormals(f=cinds, v=v) vns = sklearn.preprocessing.normalize(vns) return v, vns, cinds
def simple_renderer(rn, verts, faces, yrot=np.radians(120), color=colors['light_pink']): # Rendered model color rn.set(v=verts, f=faces, vc=color, bgcolor=np.zeros(3)) rn.vc = VertNormals(verts, faces, True).r.reshape((-1,3)) rn.vc = (rn.vc + 1.0)*0.5 # print("VN!") # print(vn) # albedo = rn.vc # # Construct Back Light (on back right corner) # rn.vc = LambertianPointLight( # f=rn.f, # v=rn.v, # num_verts=len(rn.v), # light_pos=_rotateY(np.array([-200, -100, -100]), yrot), # vc=albedo, # light_color=np.array([1, 1, 1])) # # Construct Left Light # rn.vc += LambertianPointLight( # f=rn.f, # v=rn.v, # num_verts=len(rn.v), # light_pos=_rotateY(np.array([800, 10, 300]), yrot), # vc=albedo, # light_color=np.array([1, 1, 1])) # # Construct Right Light # rn.vc += LambertianPointLight( # f=rn.f, # v=rn.v, # num_verts=len(rn.v), # light_pos=_rotateY(np.array([-500, 500, 1000]), yrot), # vc=albedo, # light_color=np.array([.7, .7, .7])) return rn.r
def main(consensus_file, camera_file, video_file, pose_file, masks_file, out, model_file, resolution, num, first_frame, last_frame, display): # load data with open(model_file, 'rb') as fp: model_data = pkl.load(fp) with open(camera_file, 'rb') as fp: camera_data = pkl.load(fp) with open(consensus_file, 'rb') as fp: consensus_data = pkl.load(fp) pose_data = h5py.File(pose_file, 'r') poses = pose_data['pose'][first_frame:last_frame] trans = pose_data['trans'][first_frame:last_frame] masks = h5py.File(masks_file, 'r')['masks'][first_frame:last_frame] num_frames = masks.shape[0] indices_texture = np.ceil(np.arange(num) * num_frames * 1. / num).astype( np.int) vt = np.load('assets/basicModel_vt.npy') ft = np.load('assets/basicModel_ft.npy') # init base_smpl = Smpl(model_data) base_smpl.betas[:] = consensus_data['betas'] base_smpl.v_personal[:] = consensus_data['v_personal'] bgcolor = np.array([1., 0.2, 1.]) iso = Isomapper(vt, ft, base_smpl.f, resolution, bgcolor=bgcolor) iso_vis = IsoColoredRenderer(vt, ft, base_smpl.f, resolution) camera = ProjectPoints(t=camera_data['camera_t'], rt=camera_data['camera_rt'], c=camera_data['camera_c'], f=camera_data['camera_f'], k=camera_data['camera_k'], v=base_smpl) frustum = { 'near': 0.1, 'far': 1000., 'width': int(camera_data['width']), 'height': int(camera_data['height']) } rn_vis = ColoredRenderer(f=base_smpl.f, frustum=frustum, camera=camera, num_channels=1) cap = cv2.VideoCapture(video_file) for _ in range(first_frame): cap.grab() # get part-textures i = first_frame tex_agg = np.zeros((resolution, resolution, 25, 3)) tex_agg[:] = np.nan normal_agg = np.ones((resolution, resolution, 25)) * 0.2 vn = VertNormals(f=base_smpl.f, v=base_smpl) static_indices = np.indices((resolution, resolution)) while cap.isOpened() and i < indices_texture[-1]: if i in indices_texture: log.info('Getting part texture from frame {}...'.format(i)) _, frame = cap.read() mask = np.array(masks[i], dtype=np.uint8) pose_i = np.array(poses[i], dtype=np.float32) trans_i = np.array(trans[i], dtype=np.float32) base_smpl.pose[:] = pose_i base_smpl.trans[:] = trans_i # which faces have been seen and are projected into the silhouette? visibility = rn_vis.visibility_image.ravel() visible = np.nonzero(visibility != 4294967295)[0] proj = camera.r in_viewport = np.logical_and( np.logical_and( np.round(camera.r[:, 0]) >= 0, np.round(camera.r[:, 0]) < frustum['width']), np.logical_and( np.round(camera.r[:, 1]) >= 0, np.round(camera.r[:, 1]) < frustum['height']), ) in_mask = np.zeros(camera.shape[0], dtype=np.bool) idx = np.round(proj[in_viewport][:, [1, 0]].T).astype( np.int).tolist() in_mask[in_viewport] = mask[idx] faces_in_mask = np.where(np.min(in_mask[base_smpl.f], axis=1))[0] visible_faces = np.intersect1d(faces_in_mask, visibility[visible]) # get the current unwrap part_tex = iso.render(frame / 255., camera, visible_faces) # angle under which the texels have been seen points = np.hstack((proj, np.ones((proj.shape[0], 1)))) points3d = camera.unproject_points(points) points3d /= np.linalg.norm(points3d, axis=1).reshape(-1, 1) alpha = np.sum(points3d * -vn.r, axis=1).reshape(-1, 1) alpha[alpha < 0] = 0 iso_normals = iso_vis.render(alpha)[:, :, 0] iso_normals[np.all(part_tex == bgcolor, axis=2)] = 0 # texels to consider part_mask = np.zeros((resolution, resolution)) min_normal = np.min(normal_agg, axis=2) part_mask[iso_normals > min_normal] = 1. # update best seen texels where = np.argmax(np.atleast_3d(iso_normals) - normal_agg, axis=2) idx = np.dstack( (static_indices[0], static_indices[1], where))[part_mask == 1] tex_agg[list(idx[:, 0]), list(idx[:, 1]), list(idx[:, 2])] = part_tex[part_mask == 1] normal_agg[list(idx[:, 0]), list(idx[:, 1]), list(idx[:, 2])] = iso_normals[part_mask == 1] if display: im.show(part_tex, id='part_tex', waittime=1) else: cap.grab() i += 1 # merge textures log.info('Computing median texture...') tex_median = np.nanmedian(tex_agg, axis=2) log.info('Inpainting unseen areas...') where = np.max(normal_agg, axis=2) > 0.2 tex_mask = iso.iso_mask mask_final = np.float32(where) kernel_size = np.int(resolution * 0.02) kernel = np.ones((kernel_size, kernel_size), np.uint8) inpaint_area = cv2.dilate(tex_mask, kernel) - mask_final tex_final = cv2.inpaint(np.uint8(tex_median * 255), np.uint8(inpaint_area * 255), 3, cv2.INPAINT_TELEA) cv2.imwrite(out, tex_final) log.info('Done.')
def fit_adam_to_target_v(param_init, target_v): # meshlib = meshWrapper(lib_file=os.path.join(rootpath, '../../build/libPythonWrapper.so')) # meshlib.load_totalmodel() smpl_v = smplWrapper.v_template * 100.0 vns_smpl = VertNormals(f=smplWrapper.f, v=smpl_v) vns_smpl = sklearn.preprocessing.normalize(vns_smpl) adam_v = copy.deepcopy(adamWrapper.v_template) adam_vNum = adam_v.shape[0] # target_adam_v = np.zeros(adam_v.shape) target_adam_vid = np.array(range(adam_vNum), dtype=float) target_adam_vid = np.reshape(target_adam_vid, (-1, 1)) target_v = np.array(target_v) # for nIter in range(10): # #ICP tJointSmpl = np.matmul(coco_reg, smpl_v) tJointAdam = np.zeros(tJointSmpl.shape) tJointAdam[dome_to_angjoo, :] = tJointSmpl p_ = (0 - param_init['pose']).tolist() b_ = param_init['betas'].tolist() t_ = param_init['trans'].tolist() f_ = param_init['faces'].tolist() meshlib.set_value(t_, b_, p_, f_) AlignParams = [] cDist = 0 for nIter in range(5): cDist = 0 # vns_adam = VertNormals(f=adamWrapper.f, v=adam_v) # vns_adam = sklearn.preprocessing.normalize(vns_adam) # for idx,cv in enumerate(adam_v): # dv = nl.norm(smpl_v - cv,axis=1) # dvn = nl.norm(vns_smpl - vns_adam[idx],axis=1) # dv_total = dv+dvn # min_v = np.argmin(dv_total) # target_adam_v[idx] = smpl_v[min_v] # cDist += dv_total[min_v] tVertsAdam = np.hstack((target_v, target_adam_vid)) #tVertsAdam = tVertsAdam[handsId,:] lHand = np.zeros((21, 3)) rHand = np.zeros((21, 3)) tJointFace = np.zeros((70, 3)) fitJointData = np.vstack((tJointAdam, [0, 0, 0], rHand, lHand, tJointFace)) #meshlib.reset_value() print fitJointData print tVertsAdam meshlib.adam_smpl_fit(fitJointData, tVertsAdam) pose = np.frombuffer(meshlib.cpose, float) betas = np.frombuffer(meshlib.ccoeff, float) trans = np.frombuffer(meshlib.ctrans, float) faces = np.frombuffer(meshlib.cface_coeff, float) pose = 0 - pose wPose = [] wBetas = [] wTrans = [] wFaces = [] wPose.append(pose) wBetas.append(betas) wTrans.append(trans) wFaces.append(faces) wTrans = np.asarray(wTrans) wBetas = np.asarray(wBetas) wPose = np.asarray(wPose) wFaces = np.asarray(wFaces) # new_adam_v,_ = adamWrapper(wBetas,wPose,wFaces) # new_adam_v += np.expand_dims(wTrans, axis=1) # new_adam_v = np.reshape(new_adam_v, (-1, 3)) # adam_v = new_adam_v c_param = { "pose": pose, "betas": betas, "trans": trans, "faces": faces, "nIter": nIter } AlignParams.append(copy.deepcopy(c_param)) print 'nIter {} with dV{}'.format(nIter, cDist) with open("alignParam.pkl", 'wb') as f: pickle.dump(AlignParams, f, protocol=pickle.HIGHEST_PROTOCOL)
def loadSMPL(frame_id): global vn_buffers, vts_buffers, inds_buffers, uvi_buffers, mask_buffers, face_num print g_paramfiles[frame_id] cvts = [] cinds = [] cvns = [] if g_args.is_obj: v, vns, cinds = loadObj(g_objfiles[frame_id]) else: with open(g_paramfiles[frame_id]) as f: SMPLParams = pickle.load(f) if not type(SMPLParams) == list: smplParam = copy.deepcopy(SMPLParams) SMPLParams = [] SMPLParams.append(smplParam) trans = [] betas = [] poses = [] exps = [] for idx, cParam in enumerate(SMPLParams): trans.append(cParam['trans']) betas.append(cParam['betas']) poses.append(cParam['pose']) if g_args.type == 'ADAM': exps.append(cParam['faces']) inds = adamWrapper.f + idx * adamWrapper.size[0] else: inds = smplWrapper.f + idx * smplWrapper.size[0] if idx == 0: cinds = inds else: cinds = np.concatenate((cinds, inds), axis=0) trans = np.asarray(trans) betas = np.asarray(betas) poses = np.asarray(poses) exps = np.asarray(exps) if (g_args.type == 'SMPL'): v, _ = smplWrapper(betas, poses) elif (g_args.type == 'ADAM'): v, _ = adamWrapper(betas, poses, exps) v += np.expand_dims(trans, axis=1) v = np.reshape(v, (-1, 3)) if (g_args.type == 'SMPL'): v = v * 100.0 #calculating normals from opendr.geometry import VertNormals vns = VertNormals(f=cinds, v=v) #print vn.shape #vns += Nor vns = sklearn.preprocessing.normalize(vns) #where the SMPL is if g_args.type == 'SMPL': vnum = smplWrapper.size[0] else: vnum = adamWrapper.size[0] body_num = v.shape[0] / vnum face_num = cinds.shape[0] if g_args.type == 'SMPL': uvi_color = np.tile(dp_colors, (body_num, 1)) else: uvi_color = np.tile(dp_colors_adam, (body_num, 1)) #uvi_color = np.hstack((uvi_color,np.ones((v.shape[0],1)))) mask_color = np.ones(uvi_color.shape, uvi_color.dtype) for bid in range(body_num): mask_color[bid * vnum:(bid + 1) * vnum, :] = 1.0 * (bid + 1) / body_num cvts = v.flatten() cvns = vns.flatten() cinds = cinds.flatten() uvi_color = uvi_color.flatten() mask_color = mask_color.flatten() glBindBuffer(GL_ARRAY_BUFFER, vn_buffers) glBufferData(GL_ARRAY_BUFFER, len(cvns) * sizeof(ctypes.c_float), (ctypes.c_float * len(cvns))(*cvns), GL_STATIC_DRAW) glBindBuffer(GL_ARRAY_BUFFER, vts_buffers) glBufferData(GL_ARRAY_BUFFER, len(cvts) * sizeof(ctypes.c_float), (ctypes.c_float * len(cvts))(*cvts), GL_STATIC_DRAW) cinds = cinds.astype(np.int) glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, inds_buffers) glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(ctypes.c_uint) * len(cinds), (ctypes.c_uint * len(cinds))(*cinds), GL_STATIC_DRAW) glBindBuffer(GL_ARRAY_BUFFER, uvi_buffers) glBufferData(GL_ARRAY_BUFFER, len(uvi_color) * sizeof(ctypes.c_float), (ctypes.c_float * len(uvi_color))(*uvi_color), GL_STATIC_DRAW) glBindBuffer(GL_ARRAY_BUFFER, mask_buffers) glBufferData(GL_ARRAY_BUFFER, len(mask_color) * sizeof(ctypes.c_float), (ctypes.c_float * len(mask_color))(*mask_color), GL_STATIC_DRAW)