def render(verts, faces, w=640, h=480): # Frontal view verts[:, 1:3] = -verts[:, 1:3] # Create OpenDR renderer rn = ColoredRenderer() # Assign attributes to renderer rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=np.array([0., 0., 2.]), f=np.array([w, h]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=verts, f=faces, bgcolor=np.zeros(3)) # Construct point light source rn.vc = LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(verts), light_pos=np.array([1000, -1000, -2000]), vc=np.ones_like(verts) * .9, light_color=np.array([1., 1., 1.])) return rn.r
def standard_render(self): ## Create OpenDR renderer rn = ColoredRenderer() ## Assign attributes to renderer w, h = (640, 480) rn.camera = ProjectPoints(v=self.m, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=self.m, f=self.m.f, bgcolor=np.zeros(3)) ## Construct point light source rn.vc = LambertianPointLight(f=self.m.f, v=rn.v, num_verts=len(self.m), light_pos=np.array([-1000, -1000, -2000]), vc=np.ones_like(self.m) * .9, light_color=np.array([1., 1., 1.])) ## Show it using OpenCV import cv2 cv2.imshow('render_SMPL', rn.r) print('..Print any key while on the display window') cv2.waitKey(0) cv2.destroyAllWindows()
def renderBody(m): from opendr.camera import ProjectPoints from opendr.renderer import ColoredRenderer from opendr.lighting import LambertianPointLight # Create OpenDR renderer rn = ColoredRenderer() # Assign attributes to renderer w, h = (640, 480) rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=m, f=m.f, bgcolor=np.zeros(3)) # Construct point light source rn.vc = LambertianPointLight(f=m.f, v=rn.v, num_verts=len(m), light_pos=np.array([-1000, -1000, -2000]), vc=np.ones_like(m) * .9, light_color=np.array([1., 1., 1.])) plt.ion() plt.imshow(np.fliplr(rn.r)) # FLIPPED! plt.show() plt.xticks([]) plt.yticks([])
def Render(): verts = np.load('../../resault/verts.npy') faces = np.load('../../resault/faces.npy') rn = ColoredRenderer() w, h = (640, 480) rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn.frustum = {'near': 0.8, 'far': 16., 'width': w, 'height': h} rn.set(v=verts, f=faces, bgcolor=np.array([255, 255, 255])) rn.vc = LambertianPointLight(f=faces, v=rn.v, num_verts=len(verts), light_pos=np.array([-1000, -1000, -2000]), vc=np.ones_like(verts) * .9, light_color=np.array([1., 1., 1.])) # import cv2 # # cv2.imshow('render_SMPL', rn.r) # cv2.waitKey(0) import matplotlib.pyplot as plt plt.ion() plt.axis('off') plt.imshow(rn.r) raw_input() plt.show()
def generate(self, img_bgr, texture_bgr): img = img_bgr self.set_texture(texture_bgr) vert_shifted, theta, cam_for_render = self.hmr.predict(img) pose = theta[self.num_cam:(self.num_cam + self.num_theta)] beta = theta[(self.num_cam + self.num_theta):] self.body.pose[:] = pose self.body.betas[:] = beta rn_vis = TexturedRenderer() rn_vis.camera = ProjectPoints(t=np.zeros(3), rt=np.zeros(3), c=cam_for_render[1:], f=np.ones(2) * cam_for_render[0], k=np.zeros(5), v=vert_shifted) rn_vis.frustum = { 'near': 0.1, 'far': 1000., 'width': self.width, 'height': self.height } rn_vis.set(v=vert_shifted, f=self.m.f, vc=self.m.vc, texture_image=self.m.texture_image, ft=self.m.ft, vt=self.m.vt, bgcolor=np.zeros(3)) # rn_vis.background_image = img_bgr / 255. if img_bgr.max() > 1 else img_bgr out_img = rn_vis.r out_img = (out_img * 255).astype(np.uint8) out_img = cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR) silhouette_rn = ColoredRenderer() silhouette_rn.camera = ProjectPoints(v=self.body, rt=np.zeros(3), t=np.zeros(3), f=np.ones(2) * cam_for_render[0], c=cam_for_render[1:], k=np.zeros(5)) silhouette_rn.frustum = { 'near': 0.1, 'far': 1000., 'width': self.width, 'height': self.height } silhouette_rn.set(v=vert_shifted, f=self.m.f, vc=self.m.vc, bgcolor=np.zeros(3)) return out_img, texture_dr_wrt(rn_vis, silhouette_rn.r), silhouette_rn.r
def mesh2Image(vertices, faces, batch, path, name, height, width, vertices_num=6890): # Create OpenDR renderer rn = ColoredRenderer() rt_1 = np.zeros(3) rn.camera = ProjectPoints( v=vertices, # vertices # v=m, rt=rt_1, # x, y, z translation of the camera, z>=0 0 0 2 t=np.array([0, 0, 0]), # f=np.array([w,w])/2, # focus length? just scaling the picture # c=np.array([w,h])/2, # just move the picture along top-left axis? not sure f=np.array([1, 1]), c=np.array([0, 0]), k=np.zeros(5)) rn.frustum = {'near': 1, 'far': 15, 'width': width, 'height': height} rn.set(v=vertices, f=faces, bgcolor=np.zeros(3)) # Construct point light source rn.vc = LambertianPointLight( f=faces, # face v=vertices, # v=rn.v, #vertex? num_verts=len(vertices), light_pos=np.array([-1000, -1000, -2000]), # point light position vc=np.ones_like(vertices) * .9, # albedo per vertex light_color=np.array([1., 1., 1.])) # Blue, Green, Red; light intensity # make the image binary(black and white); these are actually magic steps rn.change_col(np.ones((vertices_num, 3))) #mask = rn.r.copy() # takes lots of time mask = rn.r * 255 import cv2 if batch == 1: cv2.imwrite('%s/%s.png' % (path, name), mask) else: cv2.imwrite('%s/%s_%d.png' % (path, name, i), mask) '''
def render(self, thetas, texture_bgr, rotate=np.array([0, 0, 0]), background_img=None): """ get the rendered image and rendered silhouette :param thetas: model parameters, 3 * camera parameter + 72 * body pose + 10 * body shape :param texture_bgr: texture image in bgr format :return: the rendered image and deviation of rendered image to texture image (rendered image, deviation of rendered image, silhouette) """ self.set_texture(texture_bgr) thetas = thetas.reshape(-1) cams = thetas[:self.num_cam] theta = thetas[self.num_cam: (self.num_cam + self.num_theta)] beta = thetas[(self.num_cam + self.num_theta):] self.body.pose[:] = theta self.body.betas[:] = beta # # size = cams[0] * min(self.w, self.h) # position = cams[1:3] * min(self.w, self.h) / 2 + min(self.w, self.h) / 2 """ #################################################################### ATTENTION! I do not know why the flength is 500. But it worked #################################################################### """ texture_rn = TexturedRenderer() texture_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]), f=np.ones(2) * self.img_size * 0.62, c=np.array([self.w / 2, self.h / 2]), k=ch.zeros(5)) texture_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h} texture_rn.set(v=self.body, f=self.m.f, vc=self.m.vc, texture_image=self.m.texture_image, ft=self.m.ft, vt=self.m.vt) if background_img is not None: texture_rn.background_image = background_img / 255. if background_img.max() > 1 else background_img silhouette_rn = ColoredRenderer() silhouette_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]), f=np.ones(2) * self.img_size * 0.62, c=np.array([self.w / 2, self.h / 2]), k=ch.zeros(5)) silhouette_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h} silhouette_rn.set(v=self.body, f=self.m.f, vc=np.ones_like(self.body), bgcolor=np.zeros(3)) return texture_rn.r, texture_dr_wrt(texture_rn, silhouette_rn.r), silhouette_rn.r
def render_smpl(m): # Create OpenDR renderer rn = ColoredRenderer() # Assign attributes to renderer w, h = (640, 480) rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array( [0, 0, 2.]), f=np.array([w, w])/2., c=np.array([w, h])/2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=m, f=m.f, bgcolor=np.zeros(3)) # Construct point light source rn.vc = LambertianPointLight( f=m.f, v=rn.v, num_verts=len(m), light_pos=np.array([-1000, -1000, -2000]), vc=np.ones_like(m)*.9, light_color=np.array([1., 1., 1.])) image = rn.r * 255 return image
def create_synth(verts, joints, skin_color, f, ss, tu, tv, rot, w, h, bg): rn = ColoredRenderer() R = cv2.Rodrigues(rot)[0] verts = np.transpose(np.matmul(R, np.transpose(verts))) joints = np.transpose(np.matmul(R, np.transpose(joints))) verts_3d = verts joints_3d = joints verts = np.array([[ss, ss, 1], ] * 778) * verts joints = np.array([[ss, ss, 1], ] * 21) * joints verts = verts + np.array([[tu, tv, 0], ] * 778) joints = joints + np.array([[tu, tv, 0], ] * 21) umax = np.max(verts[:, 0]) umin = np.min(verts[:, 0]) vmax = np.max(verts[:, 1]) vmin = np.min(verts[:, 1]) if ((umin < 0.) or (vmin < 0.) or (umax > w) or (vmax > h)): print('mesh outside') verts[:, 2] = 10. + (verts[:, 2] - np.mean(verts[:, 2])) verts[:, :2] = verts[:, :2] * np.expand_dims(verts[:, 2], 1) rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=np.array([0, 0, 0]), f=np.array([1, 1]), c=np.array([0, 0]), k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 20., 'width': w, 'height': h} rn.set(v=verts, f=f, bgcolor=np.zeros(3)) rn.vc = np.ones((778, 3)) mask = rn.r.copy() mask = mask[:, :, 0].astype(np.uint8) rn.vc = skin_color hand = rn.r.copy() * 255. image = (1 - np.expand_dims(mask, 2)) * bg + np.expand_dims(mask, 2) * hand image = image.astype(np.uint8) image = Image.fromarray(image).resize((224, 224), Image.LANCZOS) return image, mask, verts_3d, joints_3d, verts, joints
def render_smpl(par, theta, beta, img_out_file, model_path, front_view=False): m = load_model(model_path) ## Assign the given pose m.pose[:] = theta m.betas[:] = beta # Define specific parameters for showing a front view of the rendering if front_view: m.pose[:3] = np.array([np.pi, 0, 0], dtype=np.float32) rt = np.zeros(3) light_source = np.array([-1000, -1000, -2000]) else: rt = np.array([3.14, 0, 0]) light_source = np.array([1000, 1000, 2000]) ## Create OpenDR renderer rn = ColoredRenderer() ## Assign attributes to renderer w, h = (640, 480) rn.camera = ProjectPoints(v=m, rt=rt, t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=m, f=m.f, bgcolor=np.zeros(3)) ## Construct point light source rn.vc = LambertianPointLight( f=m.f, v=rn.v, num_verts=len(m), #light_pos=np.array([-1000,-1000,-2000]), light_pos=light_source, vc=np.ones_like(m) * .9, light_color=np.array([1., 1., 1.])) cv2.imwrite(img_out_file, rn.r * 255.0)
# 11 (right ankle) 8 # 12 (left hip) 1 # 13 (left knee) 4 # 14 (left ankle) 7 #load SMPL model m = load_model( '/home/xiul/workspace/SMPL_python/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl' ) print(m.pose.size) m.pose[:] = np.random.rand(m.pose.size) * np.pi / 2 m.betas[:] = 0 m.pose[0] = np.pi rn = ColoredRenderer() w, h = (640, 480) rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=m, f=m.f, bgcolor=np.zeros(3)) rn.vc = LambertianPointLight(f=m.f, v=rn.v, num_verts=len(m), light_pos=np.array([-1000, -1000, -2000]), vc=np.ones_like(m) * .9, light_color=np.array([1., 1., 1.])) plt.imshow(rn.r) plt.show()
class Ui_MainWindow(QtWidgets.QMainWindow, Ui_MainWindow_Base): def __init__(self): super(self.__class__, self).__init__() self.setupUi(self) self._moving = False self._rotating = False self._mouse_begin_pos = None self._loaded_gender = None self._update_canvas = False self.camera = ProjectPoints(rt=np.zeros(3), t=np.zeros(3)) self.joints2d = ProjectPoints(rt=np.zeros(3), t=np.zeros(3)) self.frustum = {'near': 0.1, 'far': 1000., 'width': 100, 'height': 30} self.light = LambertianPointLight(vc=np.array([0.94, 0.94, 0.94]), light_color=np.array([1., 1., 1.])) self.rn = ColoredRenderer(bgcolor=np.ones(3), frustum=self.frustum, camera=self.camera, vc=self.light, overdraw=False) self.model = None self._init_model('f') self.model.pose[0] = np.pi self.camera_widget = Ui_CameraWidget(self.camera, self.frustum, self.draw) self.btn_camera.clicked.connect(lambda: self._show_camera_widget()) for key, shape in self._shapes(): shape.valueChanged[int].connect(lambda val, k=key: self._update_shape(k, val)) for key, pose in self._poses(): pose.valueChanged[int].connect(lambda val, k=key: self._update_pose(k, val)) self.pos_0.valueChanged[float].connect(lambda val: self._update_position(0, val)) self.pos_1.valueChanged[float].connect(lambda val: self._update_position(1, val)) self.pos_2.valueChanged[float].connect(lambda val: self._update_position(2, val)) self.radio_f.pressed.connect(lambda: self._init_model('f')) self.radio_m.pressed.connect(lambda: self._init_model('m')) self.reset_pose.clicked.connect(self._reset_pose) self.reset_shape.clicked.connect(self._reset_shape) self.reset_postion.clicked.connect(self._reset_position) self.canvas.wheelEvent = self._zoom self.canvas.mousePressEvent = self._mouse_begin self.canvas.mouseMoveEvent = self._move self.canvas.mouseReleaseEvent = self._mouse_end self.action_save.triggered.connect(self._save_config_dialog) self.action_open.triggered.connect(self._open_config_dialog) self.action_save_screenshot.triggered.connect(self._save_screenshot_dialog) self.action_save_mesh.triggered.connect(self._save_mesh_dialog) self.view_joints.triggered.connect(self.draw) self.view_joint_ids.triggered.connect(self.draw) self.view_bones.triggered.connect(self.draw) self._update_canvas = True def showEvent(self, event): self._init_camera() super(self.__class__, self).showEvent(event) def resizeEvent(self, event): self._init_camera() super(self.__class__, self).resizeEvent(event) def closeEvent(self, event): self.camera_widget.close() super(self.__class__, self).closeEvent(event) def draw(self): if self._update_canvas: img = np.array(self.rn.r) if self.view_joints.isChecked() or self.view_joint_ids.isChecked() or self.view_bones.isChecked(): img = self._draw_annotations(img) self.canvas.setScaledContents(False) self.canvas.setPixmap(self._to_pixmap(img)) def _draw_annotations(self, img): self.joints2d.set(t=self.camera.t, rt=self.camera.rt, f=self.camera.f, c=self.camera.c, k=self.camera.k) if self.view_bones.isChecked(): kintree = self.model.kintree_table[:, 1:] for k in range(kintree.shape[1]): cv2.line(img, (int(self.joints2d.r[kintree[0, k], 0]), int(self.joints2d.r[kintree[0, k], 1])), (int(self.joints2d.r[kintree[1, k], 0]), int(self.joints2d.r[kintree[1, k], 1])), (0.98, 0.98, 0.98), 3) if self.view_joints.isChecked(): for j in self.joints2d.r: cv2.circle(img, (int(j[0]), int(j[1])), 5, (0.38, 0.68, 0.15), -1) if self.view_joint_ids.isChecked(): for k, j in enumerate(self.joints2d.r): cv2.putText(img, str(k), (int(j[0]), int(j[1])), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0.3, 0.23, 0.9), 2) return img def _init_model(self, g): pose = None betas = None trans = None if self.model is not None: pose = self.model.pose.r betas = self.model.betas.r trans = self.model.trans.r if g == 'f': self.model = load_model('smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl') else: self.model = load_model('smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl') self._loaded_gender = g if pose is not None: self.model.pose[:] = pose self.model.betas[:] = betas self.model.trans[:] = trans self.light.set(v=self.model, f=self.model.f, num_verts=len(self.model)) self.rn.set(v=self.model, f=self.model.f) self.camera.set(v=self.model) self.joints2d.set(v=self.model.J_transformed) self.draw() def _init_camera(self): w = self.canvas.width() h = self.canvas.height() if w != self.frustum['width'] and h != self.frustum['height']: self.camera.set(rt=np.array([self.camera_widget.rot_0.value(), self.camera_widget.rot_1.value(), self.camera_widget.rot_2.value()]), t=np.array([self.camera_widget.pos_0.value(), self.camera_widget.pos_1.value(), self.camera_widget.pos_2.value()]), f=np.array([w, w]) * self.camera_widget.focal_len.value(), c=np.array([w, h]) / 2., k=np.array([self.camera_widget.dist_0.value(), self.camera_widget.dist_1.value(), self.camera_widget.dist_2.value(), self.camera_widget.dist_3.value(), self.camera_widget.dist_4.value()])) self.frustum['width'] = w self.frustum['height'] = h self.light.set(light_pos=Rodrigues(self.camera.rt).T.dot(self.camera.t) * -10.) self.rn.set(frustum=self.frustum, camera=self.camera) self.draw() def _save_config_dialog(self): filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save config', None, 'Config File (*.ini)') if filename: with open(str(filename), 'w') as fp: config = ConfigParser.ConfigParser() config.add_section('Model') config.set('Model', 'gender', self._loaded_gender) config.set('Model', 'shape', ','.join(str(s) for s in self.model.betas.r)) config.set('Model', 'pose', ','.join(str(p) for p in self.model.pose.r)) config.set('Model', 'translation', ','.join(str(p) for p in self.model.trans.r)) config.add_section('Camera') config.set('Camera', 'translation', ','.join(str(t) for t in self.camera.t.r)) config.set('Camera', 'rotation', ','.join(str(r) for r in self.camera.rt.r)) config.set('Camera', 'focal_length', self.camera_widget.focal_len.value()) config.set('Camera', 'center', '{},{}'.format(self.camera_widget.center_0.value(), self.camera_widget.center_1.value())) config.set('Camera', 'distortion', ','.join(str(r) for r in self.camera.k.r)) config.write(fp) def _open_config_dialog(self): filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Load config', None, 'Config File (*.ini)') if filename: config = ConfigParser.ConfigParser() config.read(str(filename)) self._update_canvas = False self._init_model(config.get('Model', 'gender')) shapes = np.fromstring(config.get('Model', 'shape'), dtype=np.float64, sep=',') poses = np.fromstring(config.get('Model', 'pose'), dtype=np.float64, sep=',') position = np.fromstring(config.get('Model', 'translation'), dtype=np.float64, sep=',') for key, shape in self._shapes(): val = shapes[key] / 5.0 * 50.0 + 50.0 shape.setValue(val) for key, pose in self._poses(): if key == 0: val = (poses[key] - np.pi) / np.pi * 50.0 + 50.0 else: val = poses[key] / np.pi * 50.0 + 50.0 pose.setValue(val) self.pos_0.setValue(position[0]) self.pos_1.setValue(position[1]) self.pos_2.setValue(position[2]) cam_pos = np.fromstring(config.get('Camera', 'translation'), dtype=np.float64, sep=',') cam_rot = np.fromstring(config.get('Camera', 'rotation'), dtype=np.float64, sep=',') cam_dist = np.fromstring(config.get('Camera', 'distortion'), dtype=np.float64, sep=',') cam_c = np.fromstring(config.get('Camera', 'center'), dtype=np.float64, sep=',') cam_f = config.getfloat('Camera', 'focal_length') print cam_c self.camera_widget.set_values(cam_pos, cam_rot, cam_f, cam_c, cam_dist) self._update_canvas = True self.draw() def _save_screenshot_dialog(self): filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save screenshot', None, 'Images (*.png *.jpg *.ppm)') if filename: img = np.array(self.rn.r) if self.view_joints.isChecked() or self.view_joint_ids.isChecked() or self.view_bones.isChecked(): img = self._draw_annotations(img) cv2.imwrite(str(filename), np.uint8(img * 255)) def _save_mesh_dialog(self): filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save mesh', None, 'Mesh (*.obj)') if filename: with open(filename, 'w') as fp: for v in self.model.r: fp.write('v %f %f %f\n' % (v[0], v[1], v[2])) for f in self.model.f + 1: fp.write('f %d %d %d\n' % (f[0], f[1], f[2])) def _zoom(self, event): delta = -event.angleDelta().y() / 1200.0 self.camera_widget.pos_2.setValue(self.camera_widget.pos_2.value() + delta) def _mouse_begin(self, event): if event.button() == 1: self._moving = True elif event.button() == 2: self._rotating = True self._mouse_begin_pos = event.pos() def _mouse_end(self, event): self._moving = False self._rotating = False def _move(self, event): if self._moving: delta = event.pos() - self._mouse_begin_pos self.camera_widget.pos_0.setValue(self.camera_widget.pos_0.value() + delta.x() / 500.) self.camera_widget.pos_1.setValue(self.camera_widget.pos_1.value() + delta.y() / 500.) self._mouse_begin_pos = event.pos() elif self._rotating: delta = event.pos() - self._mouse_begin_pos self.camera_widget.rot_0.setValue(self.camera_widget.rot_0.value() + delta.y() / 300.) self.camera_widget.rot_1.setValue(self.camera_widget.rot_1.value() - delta.x() / 300.) self._mouse_begin_pos = event.pos() def _show_camera_widget(self): self.camera_widget.show() self.camera_widget.raise_() def _update_shape(self, id, val): val = (val - 50) / 50.0 * 5.0 self.model.betas[id] = val self.draw() def _reset_shape(self): self._update_canvas = False for key, shape in self._shapes(): shape.setValue(50) self._update_canvas = True self.draw() def _update_pose(self, id, val): val = (val - 50) / 50.0 * np.pi if id == 0: val += np.pi self.model.pose[id] = val self.draw() def _reset_pose(self): self._update_canvas = False for key, pose in self._poses(): pose.setValue(50) self._update_canvas = True self.draw() def _update_position(self, id, val): self.model.trans[id] = val self.draw() def _reset_position(self): self._update_canvas = False self.pos_0.setValue(0) self.pos_1.setValue(0) self.pos_2.setValue(0) self._update_canvas = True self.draw() def _poses(self): return enumerate([ self.pose_0, self.pose_1, self.pose_2, self.pose_3, self.pose_4, self.pose_5, self.pose_6, self.pose_7, self.pose_8, self.pose_9, self.pose_10, self.pose_11, self.pose_12, self.pose_13, self.pose_14, self.pose_15, self.pose_16, self.pose_17, self.pose_18, self.pose_19, self.pose_20, self.pose_21, self.pose_22, self.pose_23, self.pose_24, self.pose_25, self.pose_26, self.pose_27, self.pose_28, self.pose_29, self.pose_30, self.pose_31, self.pose_32, self.pose_33, self.pose_34, self.pose_35, self.pose_36, self.pose_37, self.pose_38, self.pose_39, self.pose_40, self.pose_41, self.pose_42, self.pose_43, self.pose_44, self.pose_45, self.pose_46, self.pose_47, self.pose_48, self.pose_49, self.pose_50, self.pose_51, self.pose_52, self.pose_53, self.pose_54, self.pose_55, self.pose_56, self.pose_57, self.pose_58, self.pose_59, self.pose_60, self.pose_61, self.pose_62, self.pose_63, self.pose_64, self.pose_65, self.pose_66, self.pose_67, self.pose_68, self.pose_69, self.pose_70, self.pose_71, ]) def _shapes(self): return enumerate([ self.shape_0, self.shape_1, self.shape_2, self.shape_3, self.shape_4, self.shape_5, self.shape_6, self.shape_7, self.shape_8, self.shape_9, ]) @staticmethod def _to_pixmap(im): if im.dtype == np.float32 or im.dtype == np.float64: im = np.uint8(im * 255) if len(im.shape) < 3 or im.shape[-1] == 1: im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB) else: im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) qimg = QtGui.QImage(im, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_RGB888) return QtGui.QPixmap(qimg)
m1 = load_model('/data/Guha/GR/code/GR19/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl') m1.betas[:] = np.random.rand(m1.betas.size) * .03 m2 = load_model('/data/Guha/GR/code/GR19/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl') m2.betas[:] = np.random.rand(m2.betas.size) * .03 ## Create OpenDR renderer rn1 = ColoredRenderer() rn2 = ColoredRenderer() ## Assign attributes to renderer w, h = (640, 480) rn1.camera = ProjectPoints(v=m1, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn1.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn1.set(v=m1, f=m1.f, bgcolor=np.zeros(3)) rn2.camera = ProjectPoints(v=m2, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn2.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn2.set(v=m2, f=m2.f, bgcolor=np.zeros(3)) ## Construct point light source rn1.vc = LambertianPointLight( f=m1.f, v=rn1.v, num_verts=len(m1), light_pos=np.array([-1000, -1000, -2000]), vc=np.ones_like(m1) * .9, light_color=np.array([1., 1., 1.]))
def render(self, image, cam, K, verts, face, draw_id=''): # roll_axis = torch.Tensor([1, 0, 0]).unsqueeze(0) # .expand(1, -1) # alpha = torch.Tensor([np.pi] * 1).unsqueeze(1) * 0.5 # pose[0, :3] = axis_angle_add(pose[0, :3].unsqueeze(0), roll_axis, alpha) # pose[:3] *= torch.Tensor([1, -1, -1]) # self.m.betas[:] = shape.numpy()[0] # self.m.pose[:] = pose.numpy()[0] # m.betas[:] = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) # m.betas[:] = np.array([0.]*10) # m.pose[:] = np.array([0.]*72) # m.pose[0] = -np.pi # m.pose[2] = 0.5 # m.pose[2] = np.pi # m.betas[0] = 0.5 ## Create OpenDR renderer rn = ColoredRenderer() # print(rn.msaa) # # rn.msaa = True ## Assign attributes to renderer w, h = (224 * self.ratio, 224 * self.ratio) # w, h = (1000, 1000) f = np.array([K[0, 0], K[1, 1]]) * float(self.ratio) c = np.array([K[0, 2], K[1, 2]]) * float(self.ratio) t = np.array([cam[1], cam[2], 2 * K[0, 0] / (224. * cam[0] + 1e-9)]) # t = np.array([0, 0, 5.]) # c = np.array([K[0, 0, 2], 112 - K[0, 1, 1] * float(cam[0, 2])]) * float(self.ratio) # rn.camera = ProjectPoints(v=m*np.array([1,-1,-1]), rt=np.zeros(3), t=np.array([0, 0, 5.]), f=f, c=c, k=np.zeros(5)) rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=t, f=f, c=c, k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 100., 'width': w, 'height': h} # [:, [1, 0, 2]] albedo = np.ones_like(verts) * .9 # albedo(6890, 3)(6890, 3)(13776, 3) color1 = np.array([0.85490196, 0.96470588, 0.96470588]) # light steel blue # color1 = np.array([i / 255. for i in [176, 196, 222]]) # color1 = np.array([i / 255. for i in [168, 173, 180]]) # color2 = np.array([i / 255. for i in [255, 244, 229]]) color2 = np.array([i / 255. for i in [181, 178, 146]]) color3 = np.array([i / 255. for i in [190, 178, 167]]) # beige # color4 = np.array([i / 255. for i in [245, 245, 220]]) # wheat color4 = np.array([i / 255. for i in [245, 222, 179]]) # thistle # color5 = np.array([i / 255. for i in [216, 191, 216]]) color5 = np.array([i / 255. for i in [183, 166, 173]]) # aqua marine color6 = np.array([i / 255. for i in [127, 255, 212]]) # turquoise color7 = np.array([i / 255. for i in [64, 224, 208]]) # medium turquoise color8 = np.array([i / 255. for i in [72, 209, 204]]) # honeydew color9 = np.array([i / 255. for i in [240, 255, 240]]) # burly wood color10 = np.array([i / 255. for i in [222, 184, 135]]) # sandy brown color11 = np.array([i / 255. for i in [244, 164, 96]]) # floral white Ours color12 = np.array([i / 255. for i in [255, 250, 240]]) # medium slate blue SPIN color13 = np.array([i / 255. for i in [72 * 2.5, 61 * 2.5, 255]]) # color_list = [color1, color2, color3, color4, color5] color_list = [ color6, color7, color8, color9, color10, color11, color12, color13 ] # color_list = color_list + [color13] # color = color_list[int(len(color_list) * float(np.random.rand(1)))] # color = color_list[-1] if self.color in ['white']: color = color12 color0 = np.array([1, 1, 1]) color1 = np.array([1, 1, 1]) color2 = np.array([0.7, 0.7, 0.7]) elif self.color in ['blue']: color = color13 color0 = color color1 = color color2 = color # rn.set(v=m*np.array([1,-1,1]), f=m.f, bgcolor=np.zeros(3)) rn.set(v=verts, f=face, vc=color, bgcolor=np.zeros(3)) # rn.set(v=rotateY(verts, np.radians(90)), f=self.m.f, bgcolor=np.zeros(3)) ## Construct point light source # rn.vc = LambertianPointLight( # f=m.f, # v=rn.v, # num_verts=len(m), # light_pos=np.array([-1000,-1000,-2000]), # vc=np.ones_like(m)*.9, # light_color=np.array([1., 1., 1.])) yrot = np.radians(120) ''' rn.vc = LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=np.array([-200, -100, -100]), vc=albedo, light_color=color) # Construct Left Light rn.vc += LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=np.array([500, 10, -200]), vc=albedo, light_color=color) # Construct Right Light rn.vc += LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=np.array([-300, 100, 600]), vc=albedo, light_color=color) ''' # 1. 1. 0.7 rn.vc = LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([-200, -100, -100]), yrot), vc=albedo, light_color=color0) # Construct Left Light rn.vc += LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([800, 10, 300]), yrot), vc=albedo, light_color=color1) # Construct Right Light rn.vc += LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([-500, 500, 1000]), yrot), vc=albedo, light_color=color2) # render_smpl = rn.r ## Construct point light source # rn.vc += SphericalHarmonics(light_color=np.array([1., 1., 1.])) img_orig = np.transpose(image, (1, 2, 0)) img_resized = resize( img_orig, (img_orig.shape[0] * self.ratio, img_orig.shape[1] * self.ratio), anti_aliasing=True) # ax_smpl = plt.subplot(2, 2, 2) # plt.imshow(rn.r) # plt.axis('off') # print(max(rn.r)) # print(min(rn.r)) # fig = plt.figure() img_smpl = img_resized.copy() img_smpl[rn.visibility_image != 4294967295] = rn.r[ rn.visibility_image != 4294967295] ''' ax_stack = plt.subplot(2, 2, 3) ax_stack.imshow(img_smpl) plt.axis('off') ''' rn.set(v=rotateY(verts, np.radians(90)), f=face, bgcolor=np.zeros(3)) render_smpl = rn.r # rn.set(v=rotateY(verts, np.radians(90)), f=self.m.f, bgcolor=np.zeros(3)) render_smpl_rgba = np.zeros( (render_smpl.shape[0], render_smpl.shape[1], 4)) render_smpl_rgba[:, :, :3] = render_smpl render_smpl_rgba[:, :, 3][rn.visibility_image != 4294967295] = 255 ''' ax_img = plt.subplot(2, 2, 1) ax_img.imshow(np.transpose(image, (1, 2, 0))) plt.axis('off') ax_smpl = plt.subplot(2, 2, 2) ax_smpl.imshow(render_smpl_rgba) plt.axis('off') ''' return img_orig, img_resized, img_smpl, render_smpl_rgba # img_uv = np.transpose(uvimage_front[0].cpu().numpy(), (1, 2, 0)) # # img_uv = resize(img_uv, (img_uv.shape[0], img_uv.shape[1]), anti_aliasing=True) # img_uv[img_uv == 0] = img_show[img_uv == 0] # plt.show() # save_path = './notebooks/output/upimgs/' save_path = './notebooks/output/demo_results-v2/' if not os.path.exists(save_path): os.makedirs(save_path) matplotlib.image.imsave(save_path + 'img_' + draw_id + '.png', img_orig) matplotlib.image.imsave(save_path + 'img_smpl_' + draw_id + '.png', img_smpl) matplotlib.image.imsave(save_path + 'smpl_' + draw_id + '.png', render_smpl_rgba)
class Renderer(object): """ Render mesh using OpenDR for visualization. """ def __init__(self, width=800, height=600, near=0.5, far=1000, faces=None): self.colors = { 'pink': [.9, .7, .7], 'light_blue': [0.65098039, 0.74117647, 0.85882353] } self.width = width self.height = height self.faces = faces self.renderer = ColoredRenderer() def render(self, vertices, faces=None, img=None, camera_t=np.zeros([3], dtype=np.float32), camera_rot=np.zeros([3], dtype=np.float32), camera_center=None, use_bg=False, bg_color=(0.0, 0.0, 0.0), body_color=None, focal_length=5000, disp_text=False, gt_keyp=None, pred_keyp=None, **kwargs): if img is not None: height, width = img.shape[:2] else: height, width = self.height, self.width if faces is None: faces = self.faces if camera_center is None: camera_center = np.array([width * 0.5, height * 0.5]) self.renderer.camera = ProjectPoints(rt=camera_rot, t=camera_t, f=focal_length * np.ones(2), c=camera_center, k=np.zeros(5)) dist = np.abs(self.renderer.camera.t.r[2] - np.mean(vertices, axis=0)[2]) far = dist + 20 self.renderer.frustum = { 'near': 1.0, 'far': far, 'width': width, 'height': height } if img is not None: if use_bg: self.renderer.background_image = img else: self.renderer.background_image = np.ones_like(img) * np.array( bg_color) if body_color is None: color = self.colors['blue'] else: color = self.colors[body_color] if isinstance(self.renderer, TexturedRenderer): color = [1., 1., 1.] self.renderer.set(v=vertices, f=faces, vc=color, bgcolor=np.ones(3)) albedo = self.renderer.vc # Construct Back Light (on back right corner) yrot = np.radians(120) self.renderer.vc = LambertianPointLight( f=self.renderer.f, v=self.renderer.v, num_verts=self.renderer.v.shape[0], light_pos=rotateY(np.array([-200, -100, -100]), yrot), vc=albedo, light_color=np.array([1, 1, 1])) # Construct Left Light self.renderer.vc += LambertianPointLight( f=self.renderer.f, v=self.renderer.v, num_verts=self.renderer.v.shape[0], light_pos=rotateY(np.array([800, 10, 300]), yrot), vc=albedo, light_color=np.array([1, 1, 1])) # Construct Right Light self.renderer.vc += LambertianPointLight( f=self.renderer.f, v=self.renderer.v, num_verts=self.renderer.v.shape[0], light_pos=rotateY(np.array([-500, 500, 1000]), yrot), vc=albedo, light_color=np.array([.7, .7, .7])) return self.renderer.r
def test_occlusion(self): if visualize: import matplotlib.pyplot as plt plt.ion() # Create renderer import chumpy as ch import numpy as np from opendr.renderer import TexturedRenderer, ColoredRenderer #rn = TexturedRenderer() rn = ColoredRenderer() # Assign attributes to renderer from util_tests import get_earthmesh m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3)) rn.texture_image = m.texture_image rn.ft = m.ft rn.vt = m.vt m.v[:,2] = np.mean(m.v[:,2]) # red is front and zero # green is back and 1 t0 = ch.array([1,0,.1]) t1 = ch.array([-1,0,.1]) v0 = ch.array(m.v) + t0 if False: v1 = ch.array(m.v*.4 + np.array([0,0,3.8])) + t1 else: v1 = ch.array(m.v) + t1 vc0 = v0*0 + np.array([[.4,0,0]]) vc1 = v1*0 + np.array([[0,.4,0]]) vc = ch.vstack((vc0, vc1)) v = ch.vstack((v0, v1)) f = np.vstack((m.f, m.f+len(v0))) w, h = (320, 240) rn.camera = ProjectPoints(v=v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5)) rn.camera.t = ch.array([0,0,-2.5]) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} m.vc = v.r*0 + np.array([[1,0,0]]) rn.set(v=v, f=f, vc=vc) t0[:] = np.array([1.4, 0, .1-.02]) t1[:] = np.array([-0.6, 0, .1+.02]) target = rn.r if visualize: plt.figure() plt.imshow(target) plt.title('target') plt.figure() plt.show() im_orig = rn.r.copy() from cvwrap import cv2 tr = t0 eps_emp = .02 eps_pred = .02 #blur = lambda x : cv2.blur(x, ksize=(5,5)) blur = lambda x : x for tr in [t0, t1]: if tr is t0: sum_limits = np.array([2.1e+2, 6.9e+1, 1.6e+2]) else: sum_limits = [1., 5., 4.] if visualize: plt.figure() for i in range(3): dr_pred = np.array(rn.dr_wrt(tr[i]).todense()).reshape(rn.shape) * eps_pred dr_pred = blur(dr_pred) # central differences tr[i] = tr[i].r + eps_emp/2. rn_greater = rn.r.copy() tr[i] = tr[i].r - eps_emp/1. rn_lesser = rn.r.copy() tr[i] = tr[i].r + eps_emp/2. dr_emp = blur((rn_greater - rn_lesser) * eps_pred / eps_emp) dr_pred_shown = np.clip(dr_pred, -.5, .5) + .5 dr_emp_shown = np.clip(dr_emp, -.5, .5) + .5 if visualize: plt.subplot(3,3,i+1) plt.imshow(dr_pred_shown) plt.title('pred') plt.axis('off') plt.subplot(3,3,3+i+1) plt.imshow(dr_emp_shown) plt.title('empirical') plt.axis('off') plt.subplot(3,3,6+i+1) diff = np.abs(dr_emp - dr_pred) if visualize: plt.imshow(diff) diff = diff.ravel() if visualize: plt.title('diff (sum: %.2e)' % (np.sum(diff))) plt.axis('off') # print 'dr pred sum: %.2e' % (np.sum(np.abs(dr_pred.ravel())),) # print 'dr emp sum: %.2e' % (np.sum(np.abs(dr_emp.ravel())),) #import pdb; pdb.set_trace() self.assertTrue(np.sum(diff) < sum_limits[i])
def render(self, image, K, verts): ## Create OpenDR renderer rn = ColoredRenderer() ## Assign attributes to renderer w, h = (224 * self.ratio, 224 * self.ratio) f = np.array([K[0, 0, 0], K[0, 1, 1]]) * float(self.ratio) c = np.array([K[0, 0, 2], K[0, 1, 2]]) * float(self.ratio) rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=np.array([0, 0, 5.]), f=f, c=c, k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} albedo = np.ones_like(self.m) * .9 color1 = np.array([0.85490196, 0.96470588, 0.96470588]) # light steel blue color2 = np.array([i / 255. for i in [181, 178, 146]]) color3 = np.array([i / 255. for i in [190, 178, 167]]) # beige # color4 = np.array([i / 255. for i in [245, 245, 220]]) # wheat color4 = np.array([i / 255. for i in [245, 222, 179]]) # thistle # color5 = np.array([i / 255. for i in [216, 191, 216]]) color5 = np.array([i / 255. for i in [183, 166, 173]]) # aqua marine color6 = np.array([i / 255. for i in [127, 255, 212]]) # turquoise color7 = np.array([i / 255. for i in [64, 224, 208]]) # medium turquoise color8 = np.array([i / 255. for i in [72, 209, 204]]) # honeydew color9 = np.array([i / 255. for i in [240, 255, 240]]) # burly wood color10 = np.array([i / 255. for i in [222, 184, 135]]) # sandy brown color11 = np.array([i / 255. for i in [244, 164, 96]]) # floral white color = np.array([i / 255. for i in [255, 250, 240]]) rn.set(v=verts, f=self.m.f, vc=color, bgcolor=np.zeros(3)) yrot = np.radians(120) rn.vc = LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([-200, -100, -100]), yrot), vc=albedo, light_color=np.array([1, 1, 1])) # Construct Left Light rn.vc += LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([800, 10, 300]), yrot), vc=albedo, light_color=np.array([1, 1, 1])) # Construct Right Light rn.vc += LambertianPointLight(f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY( np.array([-500, 500, 1000]), yrot), vc=albedo, light_color=np.array([.7, .7, .7])) img_orig = np.transpose(image, (1, 2, 0)) img_resized = resize( img_orig, (img_orig.shape[0] * self.ratio, img_orig.shape[1] * self.ratio), anti_aliasing=True) img_smpl = img_resized.copy() img_smpl[rn.visibility_image != 4294967295] = rn.r[ rn.visibility_image != 4294967295] rn.set(v=rotateY(verts, np.radians(90)), f=self.m.f, bgcolor=np.zeros(3)) render_smpl = rn.r smpl_rgba = np.zeros((render_smpl.shape[0], render_smpl.shape[1], 4)) smpl_rgba[:, :, :3] = render_smpl smpl_rgba[:, :, 3][rn.visibility_image != 4294967295] = 255 return img_orig, img_resized, img_smpl, smpl_rgba
m = load_model('../../models/basicModel_f_lbs_10_207_0_v1.0.0.pkl') ## Assign random pose and shape parameters m.pose[:] = np.random.rand(m.pose.size) * .2 m.betas[:] = np.random.rand(m.betas.size) * .03 m.pose[0] = np.pi ## Create OpenDR renderer rn = ColoredRenderer() ## Assign attributes to renderer w, h = (640, 480) rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w,w])/2., c=np.array([w,h])/2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=m, f=m.f, bgcolor=np.zeros(3)) ## Construct point light source rn.vc = LambertianPointLight( f=m.f, v=rn.v, num_verts=len(m), light_pos=np.array([-1000,-1000,-2000]), vc=np.ones_like(m)*.9, light_color=np.array([1., 1., 1.])) ## Show it using OpenCV import cv2 cv2.imwrite( "./test.png", rn.r ) print 'image saved'
KKK = chf.cam_compute_intrinsic(res) TTT = RT[:3, 3] RRR = RT[:3, :3] RRR = RRR * opencv2opengl fff = model.f.copy() vvv = model.r.copy() rn.camera = ProjectPoints(f=ch.array([KKK[0, 0], KKK[1, 1]]), rt=cv2.Rodrigues(RRR)[0].flatten(), t=ch.array(TTT), k=ch.array([0, 0, 0, 0]), c=ch.array([KKK[0, 2], KKK[1, 2]])) rn.frustum = {'near': 0.1, 'far': 15., 'width': w, 'height': h} rn.set(v=vvv, f=fff, bgcolor=ch.zeros(3)) rn.background_image = img / 255. if img.max() > 1 else img # Construct point light source rn.vc = LambertianPointLight(f=model.f, v=rn.v, num_verts=len(model), light_pos=ch.array([0, 0, 0]), vc=np.ones_like(model) * .5, light_color=ch.array([-100., -100., -100.])) if toOpenGL: cv2.imshow('render_SMPL', rn.r) cv2.waitKey(0) cv2.destroyAllWindows() else:
def test_occlusion(self): if visualize: import matplotlib.pyplot as plt plt.ion() # Create renderer import chumpy as ch import numpy as np from opendr.renderer import TexturedRenderer, ColoredRenderer #rn = TexturedRenderer() rn = ColoredRenderer() # Assign attributes to renderer from util_tests import get_earthmesh m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3)) rn.texture_image = m.texture_image rn.ft = m.ft rn.vt = m.vt m.v[:,2] = np.mean(m.v[:,2]) # red is front and zero # green is back and 1 t0 = ch.array([1,0,.1]) t1 = ch.array([-1,0,.1]) v0 = ch.array(m.v) + t0 if False: v1 = ch.array(m.v*.4 + np.array([0,0,3.8])) + t1 else: v1 = ch.array(m.v) + t1 vc0 = v0*0 + np.array([[.4,0,0]]) vc1 = v1*0 + np.array([[0,.4,0]]) vc = ch.vstack((vc0, vc1)) v = ch.vstack((v0, v1)) f = np.vstack((m.f, m.f+len(v0))) w, h = (320, 240) rn.camera = ProjectPoints(v=v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5)) rn.camera.t = ch.array([0,0,-2.5]) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} m.vc = v.r*0 + np.array([[1,0,0]]) rn.set(v=v, f=f, vc=vc) t0[:] = np.array([1.4, 0, .1-.02]) t1[:] = np.array([-0.6, 0, .1+.02]) target = rn.r if visualize: plt.figure() plt.imshow(target) plt.title('target') plt.figure() plt.show() im_orig = rn.r.copy() from cvwrap import cv2 tr = t0 eps_emp = .02 eps_pred = .02 #blur = lambda x : cv2.blur(x, ksize=(5,5)) blur = lambda x : x for tr in [t0, t1]: if tr is t0: sum_limits = np.array([2.1e+2, 6.9e+1, 1.6e+2]) else: sum_limits = [1., 5., 4.] if visualize: plt.figure() for i in range(3): dr_pred = np.array(rn.dr_wrt(tr[i]).toarray()).reshape(rn.shape) * eps_pred dr_pred = blur(dr_pred) # central differences tr[i] = tr[i].r + eps_emp/2. rn_greater = rn.r.copy() tr[i] = tr[i].r - eps_emp/1. rn_lesser = rn.r.copy() tr[i] = tr[i].r + eps_emp/2. dr_emp = blur((rn_greater - rn_lesser) * eps_pred / eps_emp) dr_pred_shown = np.clip(dr_pred, -.5, .5) + .5 dr_emp_shown = np.clip(dr_emp, -.5, .5) + .5 if visualize: plt.subplot(3,3,i+1) plt.imshow(dr_pred_shown) plt.title('pred') plt.axis('off') plt.subplot(3,3,3+i+1) plt.imshow(dr_emp_shown) plt.title('empirical') plt.axis('off') plt.subplot(3,3,6+i+1) diff = np.abs(dr_emp - dr_pred) if visualize: plt.imshow(diff) diff = diff.ravel() if visualize: plt.title('diff (sum: %.2e)' % (np.sum(diff))) plt.axis('off') # print 'dr pred sum: %.2e' % (np.sum(np.abs(dr_pred.ravel())),) # print 'dr emp sum: %.2e' % (np.sum(np.abs(dr_emp.ravel())),) #import pdb; pdb.set_trace() self.assertTrue(np.sum(diff) < sum_limits[i])
def render(self, image, cam, K, verts, face): ## Create OpenDR renderer rn = ColoredRenderer() ## Assign attributes to renderer w, h = (224 * self.ratio, 224 * self.ratio) f = np.array([K[0, 0], K[1, 1]]) * float(self.ratio) c = np.array([K[0, 2], K[1, 2]]) * float(self.ratio) t = np.array([cam[1], cam[2], 2 * K[0, 0] / (224. * cam[0] + 1e-9)]) rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=t, f=f, c=c, k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 100., 'width': w, 'height': h} albedo = np.ones_like(verts)*.9 if self.color is not None: color0 = self.color color1 = self.color color2 = self.color else: # white color0 = np.array([1, 1, 1]) color1 = np.array([1, 1, 1]) color2 = np.array([0.7, 0.7, 0.7]) rn.set(v=verts, f=face, bgcolor=np.zeros(3)) yrot = np.radians(120) rn.vc = LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY(np.array([-200, -100, -100]), yrot), vc=albedo, light_color=color0) # Construct Left Light rn.vc += LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY(np.array([800, 10, 300]), yrot), vc=albedo, light_color=color1) # Construct Right Light rn.vc += LambertianPointLight( f=rn.f, v=rn.v, num_verts=len(rn.v), light_pos=rotateY(np.array([-500, 500, 1000]), yrot), vc=albedo, light_color=color2) img_orig = np.transpose(image, (1, 2, 0)) img_resized = resize(img_orig, (img_orig.shape[0] * self.ratio, img_orig.shape[1] * self.ratio), anti_aliasing=True) img_smpl = img_resized.copy() img_smpl[rn.visibility_image != 4294967295] = rn.r[rn.visibility_image != 4294967295] rn.set(v=rotateY(verts, np.radians(90)), f=face, bgcolor=np.zeros(3)) render_smpl = rn.r render_smpl_rgba = np.zeros((render_smpl.shape[0], render_smpl.shape[1], 4)) render_smpl_rgba[:, :, :3] = render_smpl render_smpl_rgba[:, :, 3][rn.visibility_image != 4294967295] = 255 return img_orig, img_resized, img_smpl, render_smpl_rgba
class MANOWrapper: """ Custom wrapper to interact with MANO model more easily """ def __init__(self, m): self.m = m self.m.betas[:] = np.random.rand(m.betas.size) * .3 # m.pose[:] = np.random.rand(m.pose.size) * .2 self.m.pose[:3] = [0., 0., 0.] self.m.pose[3:] = np.zeros(45) # m.pose[3:] = [-0.42671473, -0.85829819, -0.50662164, +1.97374622, -0.84298473, -1.29958491] self.m.pose[0] = np.pi # compute inverse components to map from fullpose spec to coefficients hands_components = np.asarray(m.hands_components) self.hands_components_inv = np.linalg.inv(hands_components) # rendering components # Assign attributes to renderer w, h = (640, 480) # Create OpenDR renderer self.rn = ColoredRenderer() self.rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([-0.03, -0.04, 0.20]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) self.rn.frustum = {'near': 0.01, 'far': 2., 'width': w, 'height': h} self.rn.set(v=m, f=m.f, bgcolor=np.zeros(3)) # Construct point light source self.rn.vc = LambertianPointLight(f=m.f, v=self.rn.v, num_verts=len(m), light_pos=np.array( [-1000, -1000, -2000]), vc=np.ones_like(m) * .9, light_color=np.array([1., 1., 1.])) self.rn.vc += LambertianPointLight(f=m.f, v=self.rn.v, num_verts=len(m), light_pos=np.array( [+2000, +2000, +2000]), vc=np.ones_like(m) * .9, light_color=np.array([1., 1., 1.])) self.mvs = MeshViewers(window_width=2000, window_height=800, shape=[1, 3]) def set_hand_rotation(self, hand_angles): """ Set the hand rotation. """ self.m.pose[:3] = hand_angles def set_joint_rotation(self, joint_angles): """ Set the joint rotation. """ coeffs = joint_angles.reshape(-1, ) @ self.hands_components_inv self.m.pose[3:] = coeffs def get_hand_rotation(self): """ Get the hand rotation. """ return self.m.fullpose[:3] def get_joint_rotation(self): """ Get the joint rotation. """ return self.m.fullpose[3:].reshape(15, 3) def render(self): radius = .01 model_Mesh = Mesh(v=self.m.r, f=self.m.f) model_Joints = [ Sphere(np.array(jointPos), radius).to_mesh(np.eye(3)[0 if jointID == 0 else 1]) for jointID, jointPos in enumerate(self.m.J_transformed) ] self.mvs[0][0].set_static_meshes([model_Mesh] + model_Joints, blocking=True) self.mvs[0][1].set_static_meshes([model_Mesh], blocking=True) model_Mesh = Mesh(v=self.m.r, f=[]) self.mvs[0][2].set_static_meshes([model_Mesh] + model_Joints, blocking=True)
from opendr.camera import ProjectPoints from opendr.lighting import LambertianPointLight rn = ColoredRenderer() # Assign attributes to renderer w, h = (640, 480) rn.camera = ProjectPoints(v=smpl, rt=np.zeros(3), t=np.array([0, 0, 3.]), f=np.array([w, w]), c=np.array([w, h]) / 2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=smpl, f=smpl.f, bgcolor=np.zeros(3)) # Construct point light source rn.vc = LambertianPointLight(f=smpl.f, v=rn.v, num_verts=len(smpl), light_pos=np.array([-1000, -1000, -2000]), vc=np.ones_like(smpl) * .9, light_color=np.array([1., 1., 1.])) # Show it using OpenCV import cv2 cv2.imshow('render_SMPL', rn.r) print('..Print any key while on the display window') cv2.waitKey(0)
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## m.pose[3:] = input_passed[i, :] # Create OpenDR renderer rn = ColoredRenderer() ## Assign attributes to renderer w, h = (640, 480) rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2., c=np.array([w, h]) / 2., k=np.zeros(5)) rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h} rn.set(v=m, f=m.f, bgcolor=np.ones(3)) ## Construct point light source rn.vc = LambertianPointLight(f=m.f, v=rn.v, num_verts=len(m), light_pos=np.array([-1000, -1000, -6000]), vc=np.ones_like(m) * .9, light_color=np.array([1., 1., 1.])) ## Show it using Open12 INPUT = (rn.r).copy() cv2.imwrite('VISUALIZATION/' + expmtname + '/INPUT/' + str(i) + '.jpg', rn.r * 255) ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## m.pose[3:] = predictions[i, :]