Пример #1
0
def simple_renderer(rn,
                    verts,
                    faces,
                    yrot=np.radians(120),
                    color=colors['light_pink']):
    rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
    albedo = rn.vc

    rn.vc = LambertianPointLight(f=rn.f,
                                 v=rn.v,
                                 num_verts=len(rn.v),
                                 light_pos=_rotateY(
                                     np.array([-200, -100, -100]), yrot),
                                 vc=albedo,
                                 light_color=np.array([1, 1, 1]))

    # Construct Left Light
    rn.vc += LambertianPointLight(f=rn.f,
                                  v=rn.v,
                                  num_verts=len(rn.v),
                                  light_pos=_rotateY(np.array([800, 10, 300]),
                                                     yrot),
                                  vc=albedo,
                                  light_color=np.array([1, 1, 1]))

    # Construct Right Light
    rn.vc += LambertianPointLight(f=rn.f,
                                  v=rn.v,
                                  num_verts=len(rn.v),
                                  light_pos=_rotateY(
                                      np.array([-500, 500, 1000]), yrot),
                                  vc=albedo,
                                  light_color=np.array([.7, .7, .7]))

    return rn.r
Пример #2
0
def render_color_model_with_lighting(w, h, v, vn, vc, f, u,
                                      sh_comps=None, light_c=ch.ones(3),
                                      vlight_pos=None, vlight_color=None,
                                      bg_img=None):
    """renders colored model with lighting effect"""
    assert(sh_comps is not None or vlight_pos is not None)
    V = ch.array(v)
    A = np.zeros_like(v)

    # SH lighting
    if sh_comps is not None:
        A += vc * SphericalHarmonics(vn=vn, components=sh_comps, light_color=light_c)

    # single point lighting (grey light)
    if vlight_color is not None and vlight_pos is not None \
            and len(vlight_pos.shape) == 1:
        A += LambertianPointLight(f=f, v=v, num_verts=len(v), light_pos=vlight_pos,
                                  light_color=vlight_color, vc=vc)

    # multiple point lighting (grey light)
    if vlight_color is not None and vlight_pos is not None \
            and len(vlight_pos.shape) == 2:
        for vlp in vlight_pos:
            A += LambertianPointLight(f=f, v=v, num_verts=len(v), light_pos=vlp,
                                      light_color=vlight_color, vc=vc)

    black_img = np.array(np.zeros((w, h, 3)), dtype=np.float32)
    bg_img_ = bg_img if bg_img is not None else black_img

    rn = ColoredRenderer(camera=u, v=V, f=f, vc=A, background_image=bg_img_,
                         frustum={'width': w, 'height': h, 'near': 0.1, 'far': 20})
    return rn.r
Пример #3
0
    def _update_model(self, id):
        print(id)
        self.model = None
        self.model_type = model_type_list[id]

        self._update_canvas = False

        self.light = LambertianPointLight(vc=np.array([0.98, 0.98, 0.98]),
                                          light_color=np.array([1., 1., 1.]))

        self.rn.set(glMode='glfw',
                    bgcolor=np.ones(3),
                    frustum=self.frustum,
                    camera=self.camera,
                    vc=self.light,
                    overdraw=False)

        self._init_model()
        self.model.pose[0] = np.pi
        self._init_camera(update_camera=True)

        self._reset_shape()
        self._reset_expression()
        self._reset_pose()
        self._reset_position()

        self._update_canvas = True
        self.draw()
Пример #4
0
    def __init__(self):
        super(self.__class__, self).__init__()
        self.setupUi(self)

        self._moving = False
        self._rotating = False
        self._mouse_begin_pos = None
        self._loaded_gender = None
        self._update_canvas = False

        self.camera = ProjectPoints(rt=np.zeros(3), t=np.zeros(3))
        self.joints2d = ProjectPoints(rt=np.zeros(3), t=np.zeros(3))
        self.frustum = {'near': 0.1, 'far': 1000., 'width': 100, 'height': 30}
        self.light = LambertianPointLight(vc=np.array([0.94, 0.94, 0.94]), light_color=np.array([1., 1., 1.]))
        self.rn = ColoredRenderer(bgcolor=np.ones(3), frustum=self.frustum, camera=self.camera, vc=self.light,
                                  overdraw=False)

        self.model = None
        self._init_model('f')
        self.model.pose[0] = np.pi

        self.camera_widget = Ui_CameraWidget(self.camera, self.frustum, self.draw)
        self.btn_camera.clicked.connect(lambda: self._show_camera_widget())

        for key, shape in self._shapes():
            shape.valueChanged[int].connect(lambda val, k=key: self._update_shape(k, val))

        for key, pose in self._poses():
            pose.valueChanged[int].connect(lambda val, k=key: self._update_pose(k, val))

        self.pos_0.valueChanged[float].connect(lambda val: self._update_position(0, val))
        self.pos_1.valueChanged[float].connect(lambda val: self._update_position(1, val))
        self.pos_2.valueChanged[float].connect(lambda val: self._update_position(2, val))

        self.radio_f.pressed.connect(lambda: self._init_model('f'))
        self.radio_m.pressed.connect(lambda: self._init_model('m'))

        self.reset_pose.clicked.connect(self._reset_pose)
        self.reset_shape.clicked.connect(self._reset_shape)
        self.reset_postion.clicked.connect(self._reset_position)

        self.canvas.wheelEvent = self._zoom
        self.canvas.mousePressEvent = self._mouse_begin
        self.canvas.mouseMoveEvent = self._move
        self.canvas.mouseReleaseEvent = self._mouse_end

        self.action_save.triggered.connect(self._save_config_dialog)
        self.action_open.triggered.connect(self._open_config_dialog)
        self.action_save_screenshot.triggered.connect(self._save_screenshot_dialog)
        self.action_save_mesh.triggered.connect(self._save_mesh_dialog)

        self.view_joints.triggered.connect(self.draw)
        self.view_joint_ids.triggered.connect(self.draw)
        self.view_bones.triggered.connect(self.draw)

        self._update_canvas = True
Пример #5
0
def _simple_renderer(rn, meshes, yrot=0, texture=None):
    mesh = meshes[0]
    if texture is not None:
        if not hasattr(mesh, 'ft'):
            mesh.ft = copy(mesh.f)
            vt = copy(mesh.v[:, :2])
            vt -= np.min(vt, axis=0).reshape((1, -1))
            vt /= np.max(vt, axis=0).reshape((1, -1))
            mesh.vt = vt
        mesh.texture_filepath = rn.texture_image

    # Set camers parameters
    if texture is not None:
        rn.set(v=mesh.v,
               f=mesh.f,
               vc=mesh.vc,
               ft=mesh.ft,
               vt=mesh.vt,
               bgcolor=np.ones(3))
    else:
        rn.set(v=mesh.v, f=mesh.f, vc=mesh.vc, bgcolor=np.ones(3))

    for next_mesh in meshes[1:]:
        _stack_with(rn, next_mesh, texture)

    # Construct Back Light (on back right corner)
    albedo = rn.vc

    rn.vc = LambertianPointLight(f=rn.f,
                                 v=rn.v,
                                 num_verts=len(rn.v),
                                 light_pos=rotateY(
                                     np.array([-200, -100, -100]), yrot),
                                 vc=albedo,
                                 light_color=np.array([1, 1, 1]))

    # Construct Left Light
    rn.vc += LambertianPointLight(f=rn.f,
                                  v=rn.v,
                                  num_verts=len(rn.v),
                                  light_pos=rotateY(np.array([800, 10, 300]),
                                                    yrot),
                                  vc=albedo,
                                  light_color=np.array([1, 1, 1]))

    # Construct Right Light
    rn.vc += LambertianPointLight(f=rn.f,
                                  v=rn.v,
                                  num_verts=len(rn.v),
                                  light_pos=rotateY(
                                      np.array([-500, 500, 1000]), yrot),
                                  vc=albedo,
                                  light_color=np.array([.7, .7, .7]))

    return rn.r
Пример #6
0
def simple_renderer(rn, meshes, yrot=np.radians(120)):
    from opendr.lighting import LambertianPointLight
    mesh = meshes[0]
    if hasattr(rn, 'texture_image'):
        if not hasattr(mesh, 'ft'):
            mesh.ft = copy(mesh.f)
            vt = copy(mesh.v[:, :2])
            vt -= np.min(vt, axis=0).reshape((1, -1))
            vt /= np.max(vt, axis=0).reshape((1, -1))
            mesh.vt = vt
        # mesh.texture_filepath = rn.texture_image
        rn.set(v=mesh.v,
               f=mesh.f,
               vc=mesh.vc,
               ft=mesh.ft,
               vt=mesh.vt,
               bgcolor=np.ones(3))
    else:
        rn.set(v=mesh.v, f=mesh.f, vc=mesh.vc, bgcolor=np.ones(3))

    for next_mesh in meshes[1:]:
        stack_with(rn, next_mesh)

    albedo = rn.vc

    # Construct Back Light (on back right corner)
    rn.vc = LambertianPointLight(f=rn.f,
                                 v=rn.v,
                                 num_verts=len(rn.v),
                                 light_pos=rotateY(
                                     np.array([-200, -100, -100]), yrot),
                                 vc=albedo,
                                 light_color=np.array([1, 1, 1]))

    # Construct Left Light
    rn.vc += LambertianPointLight(f=rn.f,
                                  v=rn.v,
                                  num_verts=len(rn.v),
                                  light_pos=rotateY(np.array([800, 10, 300]),
                                                    yrot),
                                  vc=albedo,
                                  light_color=np.array([1, 1, 1]))

    # Construct Right Light
    rn.vc += LambertianPointLight(f=rn.f,
                                  v=rn.v,
                                  num_verts=len(rn.v),
                                  light_pos=rotateY(
                                      np.array([-500, 500, 1000]), yrot),
                                  vc=albedo,
                                  light_color=np.array([.7, .7, .7]))

    return rn.r
Пример #7
0
    def standard_render(self):

        ## Create OpenDR renderer
        rn = ColoredRenderer()

        ## Assign attributes to renderer
        w, h = (640, 480)

        rn.camera = ProjectPoints(v=self.m,
                                  rt=np.zeros(3),
                                  t=np.array([0, 0, 2.]),
                                  f=np.array([w, w]) / 2.,
                                  c=np.array([w, h]) / 2.,
                                  k=np.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=self.m, f=self.m.f, bgcolor=np.zeros(3))

        ## Construct point light source
        rn.vc = LambertianPointLight(f=self.m.f,
                                     v=rn.v,
                                     num_verts=len(self.m),
                                     light_pos=np.array([-1000, -1000, -2000]),
                                     vc=np.ones_like(self.m) * .9,
                                     light_color=np.array([1., 1., 1.]))

        ## Show it using OpenCV
        import cv2
        cv2.imshow('render_SMPL', rn.r)
        print('..Print any key while on the display window')
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Пример #8
0
def render(verts, faces, w=640, h=480):
    # Frontal view
    verts[:, 1:3] = -verts[:, 1:3]

    # Create OpenDR renderer
    rn = ColoredRenderer()

    # Assign attributes to renderer
    rn.camera = ProjectPoints(v=verts,
                              rt=np.zeros(3),
                              t=np.array([0., 0., 2.]),
                              f=np.array([w, h]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=verts, f=faces, bgcolor=np.zeros(3))

    # Construct point light source
    rn.vc = LambertianPointLight(f=rn.f,
                                 v=rn.v,
                                 num_verts=len(verts),
                                 light_pos=np.array([1000, -1000, -2000]),
                                 vc=np.ones_like(verts) * .9,
                                 light_color=np.array([1., 1., 1.]))

    return rn.r
Пример #9
0
def Render():

    verts = np.load('../../resault/verts.npy')
    faces = np.load('../../resault/faces.npy')

    rn = ColoredRenderer()
    w, h = (640, 480)

    rn.camera = ProjectPoints(v=verts,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 2.]),
                              f=np.array([w, w]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 0.8, 'far': 16., 'width': w, 'height': h}
    rn.set(v=verts, f=faces, bgcolor=np.array([255, 255, 255]))

    rn.vc = LambertianPointLight(f=faces,
                                 v=rn.v,
                                 num_verts=len(verts),
                                 light_pos=np.array([-1000, -1000, -2000]),
                                 vc=np.ones_like(verts) * .9,
                                 light_color=np.array([1., 1., 1.]))

    # import cv2
    #
    # cv2.imshow('render_SMPL', rn.r)
    # cv2.waitKey(0)
    import matplotlib.pyplot as plt
    plt.ion()
    plt.axis('off')
    plt.imshow(rn.r)
    raw_input()
    plt.show()
Пример #10
0
def main(mesh_list, out_list, scale=1.0, move_scale=True):
    assert len(mesh_list) == len(out_list)
    for mesh_file, out_file in zip(mesh_list, out_list):
        mesh = load_obj_data_binary(mesh_file)
        if move_scale:  # move to center and scale to unit bounding box
            mesh['v'] = (mesh['v'] - np.array([128, -192, 128]) +
                         0.5) * voxel_size

        if not ('vn' in mesh and mesh['vn'] is not None):
            mesh['vn'] = np.array(VertNormals(f=mesh['f'], v=mesh['v']))

        V = ch.array(mesh['v']) * scale
        V -= trans

        C = np.ones_like(mesh['v'])
        C *= np.array([186, 212, 255], dtype=np.float32) / 255.0
        # C *= np.array([158, 180, 216], dtype=np.float32) / 250.0
        C = np.minimum(C, 1.0)
        A = np.zeros_like(mesh['v'])
        A += LambertianPointLight(f=mesh['f'],
                                  v=V,
                                  vn=-mesh['vn'],
                                  num_verts=len(V),
                                  light_pos=np.array([0, -50, -50]),
                                  light_color=np.array([1.0, 1.0, 1.0]),
                                  vc=C)

        cam_t, cam_r = ch.array((0, 0, 0)), ch.array((3.14, 0, 0))
        U = ProjectPoints(v=V,
                          f=[flength, flength],
                          c=[w / 2., h / 2.],
                          k=ch.zeros(5),
                          t=cam_t,
                          rt=cam_r)
        rn = ColoredRenderer(camera=U,
                             v=V,
                             f=mesh['f'],
                             vc=A,
                             bgcolor=np.array([1.0, 0.0, 0.0]),
                             frustum={
                                 'width': w,
                                 'height': h,
                                 'near': 0.1,
                                 'far': 20
                             })

        img = np.asarray(rn)[:, :, (2, 1, 0)]
        msk = np.sum(np.abs(img - np.array([[[0, 0, 1.0]]], dtype=np.float32)),
                     axis=-1,
                     keepdims=True)
        msk[msk > 0] = 1
        img = cv.resize(img, (img.shape[1] // 2, img.shape[0] // 2))
        msk = cv.resize(msk, (msk.shape[1] // 2, msk.shape[0] // 2),
                        interpolation=cv.INTER_AREA)
        msk[msk < 1] = 0
        msk = msk[:, :, np.newaxis]
        img = np.concatenate([img, msk], axis=-1)
        cv.imshow('render3', img)
        cv.waitKey(3)
        cv.imwrite(out_file, np.uint8(img * 255))
Пример #11
0
def renderBody(m):
    from opendr.camera import ProjectPoints
    from opendr.renderer import ColoredRenderer
    from opendr.lighting import LambertianPointLight
    # Create OpenDR renderer
    rn = ColoredRenderer()
    # Assign attributes to renderer
    w, h = (640, 480)
    rn.camera = ProjectPoints(v=m,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 2.]),
                              f=np.array([w, w]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=m, f=m.f, bgcolor=np.zeros(3))
    # Construct point light source
    rn.vc = LambertianPointLight(f=m.f,
                                 v=rn.v,
                                 num_verts=len(m),
                                 light_pos=np.array([-1000, -1000, -2000]),
                                 vc=np.ones_like(m) * .9,
                                 light_color=np.array([1., 1., 1.]))
    plt.ion()
    plt.imshow(np.fliplr(rn.r))  # FLIPPED!
    plt.show()
    plt.xticks([])
    plt.yticks([])
Пример #12
0
def simple_renderer(rn,
                    verts,
                    faces,
                    yrot=np.radians(120),
                    color=colors['light_pink']):
    # Rendered model color
    rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
    albedo = rn.vc

    # Construct Back Light (on back right corner)
    rn.vc = LambertianPointLight(f=rn.f,
                                 v=rn.v,
                                 num_verts=len(rn.v),
                                 light_pos=_rotateY(
                                     np.array([-200, -100, -100]), yrot),
                                 vc=albedo,
                                 light_color=np.array([1, 1, 1]))

    # Construct Left Light
    rn.vc += LambertianPointLight(f=rn.f,
                                  v=rn.v,
                                  num_verts=len(rn.v),
                                  light_pos=_rotateY(np.array([800, 10, 300]),
                                                     yrot),
                                  vc=albedo,
                                  light_color=np.array([1, 1, 1]))

    # Construct Right Light
    rn.vc += LambertianPointLight(f=rn.f,
                                  v=rn.v,
                                  num_verts=len(rn.v),
                                  light_pos=_rotateY(
                                      np.array([-500, 500, 1000]), yrot),
                                  vc=albedo,
                                  light_color=np.array([.7, .7, .7]))
    flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0., 0.0],
                              [0.0, 0., -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
    rn.camera.openglMat = flipXRotation  # this is from setupcamera in utils
    rn.glMode = 'glfw'
    rn.sharedWin = None
    rn.overdraw = True
    rn.nsamples = 8
    rn.msaa = True  # Without anti-aliasing optimization often does not work.
    rn.initGL()
    rn.debug = False
    return rn.r
Пример #13
0
    def __init__(self, m):
        self.m = m
        self.m.betas[:] = np.random.rand(m.betas.size) * .3
        # m.pose[:] = np.random.rand(m.pose.size) * .2
        self.m.pose[:3] = [0., 0., 0.]
        self.m.pose[3:] = np.zeros(45)
        # m.pose[3:] = [-0.42671473, -0.85829819, -0.50662164, +1.97374622, -0.84298473, -1.29958491]
        self.m.pose[0] = np.pi

        # compute inverse components to map from fullpose spec to coefficients
        hands_components = np.asarray(m.hands_components)
        self.hands_components_inv = np.linalg.inv(hands_components)

        # rendering components
        # Assign attributes to renderer
        w, h = (640, 480)

        # Create OpenDR renderer
        self.rn = ColoredRenderer()
        self.rn.camera = ProjectPoints(v=m,
                                       rt=np.zeros(3),
                                       t=np.array([-0.03, -0.04, 0.20]),
                                       f=np.array([w, w]) / 2.,
                                       c=np.array([w, h]) / 2.,
                                       k=np.zeros(5))
        self.rn.frustum = {'near': 0.01, 'far': 2., 'width': w, 'height': h}
        self.rn.set(v=m, f=m.f, bgcolor=np.zeros(3))

        # Construct point light source
        self.rn.vc = LambertianPointLight(f=m.f,
                                          v=self.rn.v,
                                          num_verts=len(m),
                                          light_pos=np.array(
                                              [-1000, -1000, -2000]),
                                          vc=np.ones_like(m) * .9,
                                          light_color=np.array([1., 1., 1.]))
        self.rn.vc += LambertianPointLight(f=m.f,
                                           v=self.rn.v,
                                           num_verts=len(m),
                                           light_pos=np.array(
                                               [+2000, +2000, +2000]),
                                           vc=np.ones_like(m) * .9,
                                           light_color=np.array([1., 1., 1.]))
        self.mvs = MeshViewers(window_width=2000,
                               window_height=800,
                               shape=[1, 3])
Пример #14
0
def simple_renderer(rn,
                    verts,
                    faces,
                    vc=None,
                    yrot=np.radians(120),
                    lighting=False):

    # Rendered model color
    if vc is None:
        color = colors['neutral']
    else:
        color = vc
    rn.set(v=verts, f=faces, vc=color, bgcolor=np.zeros(3))

    #Construct Back Light (on back right corner)
    #No light
    if lighting:
        albedo = rn.vc

        rn.vc = LambertianPointLight(f=rn.f,
                                     v=rn.v,
                                     num_verts=len(rn.v),
                                     light_pos=_rotateY(
                                         np.array([-200, -100, -100]), yrot),
                                     vc=albedo,
                                     light_color=np.array([1, 1, 1]))

        # Construct Left Light
        rn.vc += LambertianPointLight(f=rn.f,
                                      v=rn.v,
                                      num_verts=len(rn.v),
                                      light_pos=_rotateY(
                                          np.array([800, 10, 300]), yrot),
                                      vc=albedo,
                                      light_color=np.array([1, 1, 1]))

        # Construct Right Light
        rn.vc += LambertianPointLight(f=rn.f,
                                      v=rn.v,
                                      num_verts=len(rn.v),
                                      light_pos=_rotateY(
                                          np.array([-500, 500, 1000]), yrot),
                                      vc=albedo,
                                      light_color=np.array([.7, .7, .7]))

    return rn
Пример #15
0
def simple_renderer(rn,
                    verts,
                    faces,
                    yrot=np.radians(120),
                    color=colors["light_pink"]):
    # Rendered model color
    # print("faces-------------")
    # print(faces.type())
    # print(verts.shape)
    rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
    albedo = rn.vc

    # Construct Back Light (on back right corner)
    rn.vc = LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
        vc=albedo,
        light_color=np.array([0.5, 0.5, 0.5]),
    )

    # Construct Left Light
    rn.vc += LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=_rotateY(np.array([800, 10, 300]), yrot),
        vc=albedo,
        light_color=np.array([0.5, 0.5, 0.5]),
    )

    # Construct Right Light
    rn.vc = LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=np.array([0, 0, 1000]),
        vc=albedo,
        light_color=np.array([1, 1, 1]),
    )
    return rn.r
Пример #16
0
def simple_renderer(rn,
                    verts,
                    faces,
                    vert_colors = None,
                    yrot = np.radians(120),
                    color = colors['light_pink']):
    # Rendered model color
    if vert_colors is None:
        rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
    else:
        rn.set(v=verts, f=faces, vc=vert_colors, bgcolor=np.ones(3))
    albedo = rn.vc
    
    
    # Construct Back Light (on back right corner)
    rn.vc = LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
        vc=albedo,
        light_color=np.array([1, 1, 1]))
    
    # Construct Left Light
    rn.vc += LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=_rotateY(np.array([800, -200, 300]), yrot),
        vc=albedo,
        light_color=np.array([1, 1, 1]))
    
    # Construct Right Light
    rn.vc += LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=_rotateY(np.array([-500, -200, 1000]), yrot),
        vc=albedo,
        light_color=np.array([.7, .7, .7]))

    return rn.r
Пример #17
0
def simple_renderer(rn,
                    verts,
                    faces,
                    yrot=np.radians(120),
                    color=colors['light_pink']):
    t0 = time.time()
    # Rendered model color
    rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
    albedo = rn.vc

    # Construct Back Light (on back right corner)
    rn.vc = LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
        vc=albedo,
        light_color=np.array([1, 1, 1]))

    # Construct Left Light
    rn.vc += LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=_rotateY(np.array([800, 10, 300]), yrot),
        vc=albedo,
        light_color=np.array([1, 1, 1]))

    # Construct Right Light
    rn.vc += LambertianPointLight(
        f=rn.f,
        v=rn.v,
        num_verts=len(rn.v),
        light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
        vc=albedo,
        light_color=np.array([.7, .7, .7]))
    #t0 = time.time()    
    #rend_img = rn.r
    #print('Render:', time.time() - t0)
    return rn.r
Пример #18
0
def mesh2Image(vertices,
               faces,
               batch,
               path,
               name,
               height,
               width,
               vertices_num=6890):
    # Create OpenDR renderer
    rn = ColoredRenderer()

    rt_1 = np.zeros(3)

    rn.camera = ProjectPoints(
        v=vertices,  # vertices
        # v=m,
        rt=rt_1,
        # x, y, z translation of the camera, z>=0    0 0 2
        t=np.array([0, 0, 0]),
        # f=np.array([w,w])/2, # focus length? just scaling the picture
        # c=np.array([w,h])/2, #  just move the picture along top-left axis? not sure
        f=np.array([1, 1]),
        c=np.array([0, 0]),
        k=np.zeros(5))
    rn.frustum = {'near': 1, 'far': 15, 'width': width, 'height': height}
    rn.set(v=vertices, f=faces, bgcolor=np.zeros(3))

    # Construct point light source
    rn.vc = LambertianPointLight(
        f=faces,  # face
        v=vertices,
        # v=rn.v, #vertex?
        num_verts=len(vertices),
        light_pos=np.array([-1000, -1000, -2000]),  # point light position
        vc=np.ones_like(vertices) * .9,  # albedo per vertex
        light_color=np.array([1., 1.,
                              1.]))  # Blue, Green, Red; light intensity

    # make the image binary(black and white); these are actually magic steps
    rn.change_col(np.ones((vertices_num, 3)))
    #mask = rn.r.copy()  # takes lots of time

    mask = rn.r * 255
    import cv2
    if batch == 1:
        cv2.imwrite('%s/%s.png' % (path, name), mask)
    else:
        cv2.imwrite('%s/%s_%d.png' % (path, name, i), mask)
    '''
Пример #19
0
def computeGlobalAndPointLighting(v, vn, vc, light_pos, globalConstant,
                                  light_color):
    # Construct point light source
    rangeMeshes = range(len(vn))
    vc_list = []
    for mesh in rangeMeshes:
        l1 = LambertianPointLight(v=v[mesh],
                                  vn=vn[mesh],
                                  num_verts=len(v[mesh]),
                                  light_pos=light_pos,
                                  vc=vc[mesh],
                                  light_color=light_color)

        vcmesh = vc[mesh] * (l1 + globalConstant)
        vc_list = vc_list + [vcmesh]
    return vc_list
Пример #20
0
def render_smpl(m):
    # Create OpenDR renderer
    rn = ColoredRenderer()
    # Assign attributes to renderer
    w, h = (640, 480)
    rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array(
        [0, 0, 2.]), f=np.array([w, w])/2., c=np.array([w, h])/2., k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=m, f=m.f, bgcolor=np.zeros(3))
    # Construct point light source
    rn.vc = LambertianPointLight(
        f=m.f,
        v=rn.v,
        num_verts=len(m),
        light_pos=np.array([-1000, -1000, -2000]),
        vc=np.ones_like(m)*.9,
        light_color=np.array([1., 1., 1.]))
    image = rn.r * 255
    return image
Пример #21
0
def render_smpl(par, theta, beta, img_out_file, model_path, front_view=False):
    m = load_model(model_path)
    ## Assign the given pose
    m.pose[:] = theta
    m.betas[:] = beta
    # Define specific parameters for showing a front view of the rendering
    if front_view:
        m.pose[:3] = np.array([np.pi, 0, 0], dtype=np.float32)
        rt = np.zeros(3)
        light_source = np.array([-1000, -1000, -2000])
    else:
        rt = np.array([3.14, 0, 0])
        light_source = np.array([1000, 1000, 2000])

    ## Create OpenDR renderer
    rn = ColoredRenderer()
    ## Assign attributes to renderer
    w, h = (640, 480)
    rn.camera = ProjectPoints(v=m,
                              rt=rt,
                              t=np.array([0, 0, 2.]),
                              f=np.array([w, w]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=m, f=m.f, bgcolor=np.zeros(3))

    ## Construct point light source
    rn.vc = LambertianPointLight(
        f=m.f,
        v=rn.v,
        num_verts=len(m),
        #light_pos=np.array([-1000,-1000,-2000]),
        light_pos=light_source,
        vc=np.ones_like(m) * .9,
        light_color=np.array([1., 1., 1.]))

    cv2.imwrite(img_out_file, rn.r * 255.0)
Пример #22
0
def computeGlobalAndPointLighting(v, vn, vc, light_pos, globalConstant,
                                  light_color):
    # [In]: v   list(np.array)
    # [In]: vn  list(np.array)
    # [In]: vc  list(np.array)
    # [In]: light_pos np.array([x, y, y])
    # [In]: globalConstant np.array([r, g, b])
    # [In]: light_color np.array([r, g, b])

    # Construct point light source
    rangeMeshes = range(len(vn))
    vc_list = []
    for mesh in rangeMeshes:
        l1 = LambertianPointLight(v=v[mesh],
                                  vn=vn[mesh],
                                  num_verts=len(v[mesh]),
                                  light_pos=light_pos,
                                  vc=vc[mesh],
                                  light_color=light_color)

        vcmesh = vc[mesh] * (l1 + globalConstant)
        vc_list = vc_list + [vcmesh]
    return vc_list
Пример #23
0
    rn = ColoredRenderer()

    # Assign attributes to renderer
    w, h = (640, 480)

    rn.camera = ProjectPoints(v=smpl,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 3.]),
                              f=np.array([w, w]),
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=smpl, f=smpl.f, bgcolor=np.zeros(3))

    # Construct point light source
    rn.vc = LambertianPointLight(f=smpl.f,
                                 v=rn.v,
                                 num_verts=len(smpl),
                                 light_pos=np.array([-1000, -1000, -2000]),
                                 vc=np.ones_like(smpl) * .9,
                                 light_color=np.array([1., 1., 1.]))

    # Show it using OpenCV
    import cv2

    cv2.imshow('render_SMPL', rn.r)
    print('..Print any key while on the display window')
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Пример #24
0
    def render(self, cam_intrinsics, dist=None, M=None, img_shape=None, render_mask=False):
        from opendr.camera import ProjectPoints
        from utils.renderer import ColoredRenderer

        if dist is None:
            dist = np.zeros(5)
        dist = dist.flatten()
        if M is None:
            M = np.eye(4)

        # get R, t from M (has to be world2cam)
        R = M[:3, :3]
        ax, angle = t3d.axangles.mat2axangle(R)
        rt = ax*angle
        rt = rt.flatten()
        t = M[:3, 3]

        w, h = (320, 320)
        if img_shape is not None:
            w, h = img_shape[1], img_shape[0]

        pp = np.array([cam_intrinsics[0, 2], cam_intrinsics[1, 2]])
        f = np.array([cam_intrinsics[0, 0], cam_intrinsics[1, 1]])

        # Create OpenDR renderer
        rn = ColoredRenderer()

        # Assign attributes to renderer
        rn.camera = ProjectPoints(rt=rt,
                                  t=t, # camera translation
                                  f=f,  # focal lengths
                                  c=pp,  # camera center (principal point)
                                  k=dist)  # OpenCv distortion params
        rn.frustum = {'near': 0.1, 'far': 5., 'width': w, 'height': h}

        V, F = self._get_verts_faces()
        rn.set(v=V,
               f=F,
               bgcolor=np.zeros(3))

        if render_mask:
            rn.vc = np.ones_like(V)  #for segmentation mask like rendering
        else:
            colors = np.ones_like(V)

            # Construct point light sources
            rn.vc = LambertianPointLight(f=F,
                                         v=V,
                                         num_verts=V.shape[0],
                                         light_pos=np.array([-1000, -1000, -2000]),
                                         vc=0.8 * colors,
                                         light_color=np.array([1., 1., 1.]))

            rn.vc += LambertianPointLight(f=F,
                                          v=V,
                                          num_verts=V.shape[0],
                                          light_pos=np.array([1000, 1000, -2000]),
                                          vc=0.25 * colors,
                                          light_color=np.array([1., 1., 1.]))

            rn.vc += LambertianPointLight(f=F,
                                          v=V,
                                          num_verts=V.shape[0],
                                          light_pos=np.array([2000, 2000, 2000]),
                                          vc=0.1 * colors,
                                          light_color=np.array([1., 1., 1.]))

            rn.vc += LambertianPointLight(f=F,
                                          v=V,
                                          num_verts=V.shape[0],
                                          light_pos=np.array([-2000, -2000, 2000]),
                                          vc=0.1 * colors,
                                          light_color=np.array([1., 1., 1.]))

        # render
        img = (np.array(rn.r) * 255).astype(np.uint8)
        return img
Пример #25
0
    def render(self,
               vertices,
               faces=None,
               img=None,
               camera_t=np.zeros([3], dtype=np.float32),
               camera_rot=np.zeros([3], dtype=np.float32),
               camera_center=None,
               use_bg=False,
               bg_color=(0.0, 0.0, 0.0),
               body_color=None,
               focal_length=5000,
               disp_text=False,
               gt_keyp=None,
               pred_keyp=None,
               **kwargs):
        if img is not None:
            height, width = img.shape[:2]
        else:
            height, width = self.height, self.width

        if faces is None:
            faces = self.faces

        if camera_center is None:
            camera_center = np.array([width * 0.5, height * 0.5])

        self.renderer.camera = ProjectPoints(rt=camera_rot,
                                             t=camera_t,
                                             f=focal_length * np.ones(2),
                                             c=camera_center,
                                             k=np.zeros(5))
        dist = np.abs(self.renderer.camera.t.r[2] -
                      np.mean(vertices, axis=0)[2])
        far = dist + 20

        self.renderer.frustum = {
            'near': 1.0,
            'far': far,
            'width': width,
            'height': height
        }

        if img is not None:
            if use_bg:
                self.renderer.background_image = img
            else:
                self.renderer.background_image = np.ones_like(img) * np.array(
                    bg_color)

        if body_color is None:
            color = self.colors['blue']
        else:
            color = self.colors[body_color]

        if isinstance(self.renderer, TexturedRenderer):
            color = [1., 1., 1.]

        self.renderer.set(v=vertices, f=faces, vc=color, bgcolor=np.ones(3))
        albedo = self.renderer.vc
        # Construct Back Light (on back right corner)
        yrot = np.radians(120)

        self.renderer.vc = LambertianPointLight(
            f=self.renderer.f,
            v=self.renderer.v,
            num_verts=self.renderer.v.shape[0],
            light_pos=rotateY(np.array([-200, -100, -100]), yrot),
            vc=albedo,
            light_color=np.array([1, 1, 1]))

        # Construct Left Light
        self.renderer.vc += LambertianPointLight(
            f=self.renderer.f,
            v=self.renderer.v,
            num_verts=self.renderer.v.shape[0],
            light_pos=rotateY(np.array([800, 10, 300]), yrot),
            vc=albedo,
            light_color=np.array([1, 1, 1]))

        #  Construct Right Light
        self.renderer.vc += LambertianPointLight(
            f=self.renderer.f,
            v=self.renderer.v,
            num_verts=self.renderer.v.shape[0],
            light_pos=rotateY(np.array([-500, 500, 1000]), yrot),
            vc=albedo,
            light_color=np.array([.7, .7, .7]))

        return self.renderer.r
def main(keypoint_file, masks_file, camera_file, out, model_file, prior_file,
         resize, body_height, nohands, display):

    # load data
    with open(model_file, 'rb') as fp:
        model_data = pkl.load(fp, encoding='latin1')

    with open(camera_file, 'rb') as fp:
        camera_data = pkl.load(fp, encoding='latin1')

    with open(prior_file, 'rb') as fp:
        prior_data = pkl.load(fp, encoding='latin1')

    if 'basicModel_f' in model_file:
        regs = np.load(
            'vendor/smplify/models/regressors_locked_normalized_female.npz')
        b2m = np.load('assets/b2m_f.npy')
    else:
        regs = np.load(
            'vendor/smplify/models/regressors_locked_normalized_male.npz')
        b2m = np.load('assets/b2m_m.npy')

    keypoints = h5py.File(keypoint_file, 'r')['keypoints']
    masks = h5py.File(masks_file, 'r')['masks']
    num_frames = masks.shape[0]

    # init
    base_smpl = Smpl(model_data)
    base_smpl.trans[:] = np.array([0, 0, 3])
    base_smpl.pose[0] = np.pi
    base_smpl.pose[3:] = prior_data['mean']

    camera = ProjectPoints(t=np.zeros(3),
                           rt=np.zeros(3),
                           c=camera_data['camera_c'] * resize,
                           f=camera_data['camera_f'] * resize,
                           k=camera_data['camera_k'],
                           v=base_smpl)
    frustum = {
        'near': 0.1,
        'far': 1000.,
        'width': int(camera_data['width'] * resize),
        'height': int(camera_data['height'] * resize)
    }

    if display:
        debug_cam = ProjectPoints(v=base_smpl,
                                  t=camera.t,
                                  rt=camera.rt,
                                  c=camera.c,
                                  f=camera.f,
                                  k=camera.k)
        debug_light = LambertianPointLight(f=base_smpl.f,
                                           v=base_smpl,
                                           num_verts=len(base_smpl),
                                           light_pos=np.zeros(3),
                                           vc=np.ones(3),
                                           light_color=np.ones(3))
        debug_rn = ColoredRenderer(camera=debug_cam,
                                   v=base_smpl,
                                   f=base_smpl.f,
                                   vc=debug_light,
                                   frustum=frustum)
    else:
        debug_rn = None

    # generic frame loading function
    def create_frame(i, smpl, copy=True):
        f = FrameData()

        f.smpl = copy_smpl(smpl, model_data) if copy else smpl
        f.camera = ProjectPoints(v=f.smpl,
                                 t=camera.t,
                                 rt=camera.rt,
                                 c=camera.c,
                                 f=camera.f,
                                 k=camera.k)

        f.keypoints = np.array(keypoints[i]).reshape(-1, 3) * np.array(
            [resize, resize, 1])
        f.J = joints_coco(f.smpl)
        f.J_proj = ProjectPoints(v=f.J,
                                 t=camera.t,
                                 rt=camera.rt,
                                 c=camera.c,
                                 f=camera.f,
                                 k=camera.k)
        f.mask = cv2.resize(np.array(masks[i], dtype=np.float32), (0, 0),
                            fx=resize,
                            fy=resize,
                            interpolation=cv2.INTER_NEAREST)

        f.collision_obj = collision_obj(f.smpl, regs)
        f.pose_prior_obj = pose_prior_obj(f.smpl, prior_data)
        f.pose_obj = (f.J_proj -
                      f.keypoints[:, :2]) * f.keypoints[:, 2].reshape(-1, 1)

        return f

    base_frame = create_frame(0, base_smpl, copy=False)

    # get betas from 5 frames
    log.info('Initial fit')

    num_init = 5
    indices_init = np.ceil(np.arange(num_init) * num_frames * 1. /
                           num_init).astype(np.int)

    init_frames = [base_frame]
    for i in indices_init[1:]:
        init_frames.append(create_frame(i, base_smpl))

    init(init_frames, body_height, b2m, debug_rn)

    # get pose frame by frame
    with h5py.File(out, 'w') as fp:
        last_smpl = None
        poses_dset = fp.create_dataset("pose", (num_frames, 72),
                                       'f',
                                       chunks=True,
                                       compression="lzf")
        trans_dset = fp.create_dataset("trans", (num_frames, 3),
                                       'f',
                                       chunks=True,
                                       compression="lzf")
        betas_dset = fp.create_dataset("betas", (10, ),
                                       'f',
                                       chunks=True,
                                       compression="lzf")

        for i in xrange(num_frames):
            if i == 0:
                current_frame = base_frame
            else:
                current_frame = create_frame(i, last_smpl)

            log.info('Fit frame {}'.format(i))
            # re-init if necessary
            reinit_frame(current_frame, prior_data['mean'], nohands, debug_rn)
            # final fit
            fit_pose(current_frame, last_smpl, frustum, nohands, debug_rn)

            poses_dset[i] = current_frame.smpl.pose.r
            trans_dset[i] = current_frame.smpl.trans.r

            if i == 0:
                betas_dset[:] = current_frame.smpl.betas.r

            last_smpl = current_frame.smpl

    log.info('Done.')
    def render(self, image, cam, K, verts, face):
        ## Create OpenDR renderer
        rn = ColoredRenderer()

        ## Assign attributes to renderer
        w, h = (224 * self.ratio, 224 * self.ratio)

        f = np.array([K[0, 0], K[1, 1]]) * float(self.ratio)
        c = np.array([K[0, 2], K[1, 2]]) * float(self.ratio)
        t = np.array([cam[1], cam[2], 2 * K[0, 0] / (224. * cam[0] + 1e-9)])
        rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=t, f=f, c=c, k=np.zeros(5))

        rn.frustum = {'near': 1., 'far': 100., 'width': w, 'height': h}

        albedo = np.ones_like(verts)*.9

        if self.color is not None:
            color0 = self.color
            color1 = self.color
            color2 = self.color
        else:
            # white
            color0 = np.array([1, 1, 1])
            color1 = np.array([1, 1, 1])
            color2 = np.array([0.7, 0.7, 0.7])

        rn.set(v=verts, f=face, bgcolor=np.zeros(3))

        yrot = np.radians(120)

        rn.vc = LambertianPointLight(
            f=rn.f,
            v=rn.v,
            num_verts=len(rn.v),
            light_pos=rotateY(np.array([-200, -100, -100]), yrot),
            vc=albedo,
            light_color=color0)

        # Construct Left Light
        rn.vc += LambertianPointLight(
            f=rn.f,
            v=rn.v,
            num_verts=len(rn.v),
            light_pos=rotateY(np.array([800, 10, 300]), yrot),
            vc=albedo,
            light_color=color1)

        # Construct Right Light
        rn.vc += LambertianPointLight(
            f=rn.f,
            v=rn.v,
            num_verts=len(rn.v),
            light_pos=rotateY(np.array([-500, 500, 1000]), yrot),
            vc=albedo,
            light_color=color2)

        img_orig = np.transpose(image, (1, 2, 0))
        img_resized = resize(img_orig, (img_orig.shape[0] * self.ratio, img_orig.shape[1] * self.ratio), anti_aliasing=True)

        img_smpl = img_resized.copy()
        img_smpl[rn.visibility_image != 4294967295] = rn.r[rn.visibility_image != 4294967295]

        rn.set(v=rotateY(verts, np.radians(90)), f=face, bgcolor=np.zeros(3))
        render_smpl = rn.r

        render_smpl_rgba = np.zeros((render_smpl.shape[0], render_smpl.shape[1], 4))
        render_smpl_rgba[:, :, :3] = render_smpl
        render_smpl_rgba[:, :, 3][rn.visibility_image != 4294967295] = 255

        return img_orig, img_resized, img_smpl, render_smpl_rgba
    mesh[:, 2] = 10.0 + (mesh[:, 2] - np.mean(mesh[:, 2]))
    mesh[:, :2] = mesh[:, :2] * np.expand_dims(mesh[:, 2], 1)

    rn.camera = ProjectPoints(v=mesh,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 0]),
                              f=np.array([1, 1]),
                              c=np.array([0, 0]),
                              k=np.zeros(5))

    rn.frustum = {'near': 1., 'far': 20., 'width': w, 'height': h}
    rn.set(v=mesh, f=m.f, bgcolor=np.zeros(3))
    rn.vc = LambertianPointLight(f=m.f,
                                 v=mesh,
                                 num_verts=len(m),
                                 light_pos=np.array([0, 0, 0]),
                                 vc=np.ones_like(m) * .9,
                                 light_color=np.array([1., 1., 1.]))

    mod = i % bg_number
    bg = misc.imread(os.path.join(bg_pth, '%d.png' % (mod)))

    rn.change_col(np.ones((778, 3)))

    mask = rn.r.copy()
    mask = mask[:, :, 0].astype(np.uint8)

    rn.change_col(colors[random.randint(0, 26)])

    hand = rn.r.copy() * 255.
    image = (1 - np.expand_dims(mask, 2)) * bg + np.expand_dims(mask, 2) * hand
Пример #29
0
rn1.camera = ProjectPoints(v=m1, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2.,
                          c=np.array([w, h]) / 2., k=np.zeros(5))
rn1.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn1.set(v=m1, f=m1.f, bgcolor=np.zeros(3))

rn2.camera = ProjectPoints(v=m2, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w, w]) / 2.,
                          c=np.array([w, h]) / 2., k=np.zeros(5))
rn2.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn2.set(v=m2, f=m2.f, bgcolor=np.zeros(3))

## Construct point light source
rn1.vc = LambertianPointLight(
    f=m1.f,
    v=rn1.v,
    num_verts=len(m1),
    light_pos=np.array([-1000, -1000, -2000]),
    vc=np.ones_like(m1) * .9,
    light_color=np.array([1., 1., 1.]))

rn2.vc = LambertianPointLight(
    f=m2.f,
    v=rn2.v,
    num_verts=len(m2),
    light_pos=np.array([-1000, -1000, -2000]),
    vc=np.ones_like(m2) * .9,
    light_color=np.array([1., 1., 1.]))
###################### Finish of Initialization of SMPL body model #############

########## path of the input file
Result_path = '/data/Guha/GR/Output/TestSet/13/'
Пример #30
0
class Ui_MainWindow(QtWidgets.QMainWindow, Ui_MainWindow_Base):
    def __init__(self):
        super(self.__class__, self).__init__()
        self.setupUi(self)

        self._moving = False
        self._rotating = False
        self._mouse_begin_pos = None
        self._loaded_gender = None
        self._update_canvas = False

        self.camera = ProjectPoints(rt=np.zeros(3), t=np.zeros(3))
        self.joints2d = ProjectPoints(rt=np.zeros(3), t=np.zeros(3))
        self.frustum = {'near': 0.1, 'far': 1000., 'width': 100, 'height': 30}
        self.light = LambertianPointLight(vc=np.array([0.94, 0.94, 0.94]), light_color=np.array([1., 1., 1.]))
        self.rn = ColoredRenderer(bgcolor=np.ones(3), frustum=self.frustum, camera=self.camera, vc=self.light,
                                  overdraw=False)

        self.model = None
        self._init_model('f')
        self.model.pose[0] = np.pi

        self.camera_widget = Ui_CameraWidget(self.camera, self.frustum, self.draw)
        self.btn_camera.clicked.connect(lambda: self._show_camera_widget())

        for key, shape in self._shapes():
            shape.valueChanged[int].connect(lambda val, k=key: self._update_shape(k, val))

        for key, pose in self._poses():
            pose.valueChanged[int].connect(lambda val, k=key: self._update_pose(k, val))

        self.pos_0.valueChanged[float].connect(lambda val: self._update_position(0, val))
        self.pos_1.valueChanged[float].connect(lambda val: self._update_position(1, val))
        self.pos_2.valueChanged[float].connect(lambda val: self._update_position(2, val))

        self.radio_f.pressed.connect(lambda: self._init_model('f'))
        self.radio_m.pressed.connect(lambda: self._init_model('m'))

        self.reset_pose.clicked.connect(self._reset_pose)
        self.reset_shape.clicked.connect(self._reset_shape)
        self.reset_postion.clicked.connect(self._reset_position)

        self.canvas.wheelEvent = self._zoom
        self.canvas.mousePressEvent = self._mouse_begin
        self.canvas.mouseMoveEvent = self._move
        self.canvas.mouseReleaseEvent = self._mouse_end

        self.action_save.triggered.connect(self._save_config_dialog)
        self.action_open.triggered.connect(self._open_config_dialog)
        self.action_save_screenshot.triggered.connect(self._save_screenshot_dialog)
        self.action_save_mesh.triggered.connect(self._save_mesh_dialog)

        self.view_joints.triggered.connect(self.draw)
        self.view_joint_ids.triggered.connect(self.draw)
        self.view_bones.triggered.connect(self.draw)

        self._update_canvas = True

    def showEvent(self, event):
        self._init_camera()
        super(self.__class__, self).showEvent(event)

    def resizeEvent(self, event):
        self._init_camera()
        super(self.__class__, self).resizeEvent(event)

    def closeEvent(self, event):
        self.camera_widget.close()
        super(self.__class__, self).closeEvent(event)

    def draw(self):
        if self._update_canvas:
            img = np.array(self.rn.r)

            if self.view_joints.isChecked() or self.view_joint_ids.isChecked() or self.view_bones.isChecked():
                img = self._draw_annotations(img)

            self.canvas.setScaledContents(False)
            self.canvas.setPixmap(self._to_pixmap(img))

    def _draw_annotations(self, img):
        self.joints2d.set(t=self.camera.t, rt=self.camera.rt, f=self.camera.f, c=self.camera.c, k=self.camera.k)

        if self.view_bones.isChecked():
            kintree = self.model.kintree_table[:, 1:]
            for k in range(kintree.shape[1]):
                cv2.line(img, (int(self.joints2d.r[kintree[0, k], 0]), int(self.joints2d.r[kintree[0, k], 1])),
                         (int(self.joints2d.r[kintree[1, k], 0]), int(self.joints2d.r[kintree[1, k], 1])),
                         (0.98, 0.98, 0.98), 3)

        if self.view_joints.isChecked():
            for j in self.joints2d.r:
                cv2.circle(img, (int(j[0]), int(j[1])), 5, (0.38, 0.68, 0.15), -1)

        if self.view_joint_ids.isChecked():
            for k, j in enumerate(self.joints2d.r):
                cv2.putText(img, str(k), (int(j[0]), int(j[1])), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0.3, 0.23, 0.9), 2)

        return img

    def _init_model(self, g):
        pose = None
        betas = None
        trans = None

        if self.model is not None:
            pose = self.model.pose.r
            betas = self.model.betas.r
            trans = self.model.trans.r

        if g == 'f':
            self.model = load_model('smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
        else:
            self.model = load_model('smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')
        self._loaded_gender = g

        if pose is not None:
            self.model.pose[:] = pose
            self.model.betas[:] = betas
            self.model.trans[:] = trans

        self.light.set(v=self.model, f=self.model.f, num_verts=len(self.model))
        self.rn.set(v=self.model, f=self.model.f)
        self.camera.set(v=self.model)
        self.joints2d.set(v=self.model.J_transformed)

        self.draw()

    def _init_camera(self):
        w = self.canvas.width()
        h = self.canvas.height()

        if w != self.frustum['width'] and h != self.frustum['height']:
            self.camera.set(rt=np.array([self.camera_widget.rot_0.value(), self.camera_widget.rot_1.value(),
                                         self.camera_widget.rot_2.value()]),
                            t=np.array([self.camera_widget.pos_0.value(), self.camera_widget.pos_1.value(),
                                        self.camera_widget.pos_2.value()]),
                            f=np.array([w, w]) * self.camera_widget.focal_len.value(),
                            c=np.array([w, h]) / 2.,
                            k=np.array([self.camera_widget.dist_0.value(), self.camera_widget.dist_1.value(),
                                        self.camera_widget.dist_2.value(), self.camera_widget.dist_3.value(),
                                        self.camera_widget.dist_4.value()]))

            self.frustum['width'] = w
            self.frustum['height'] = h

            self.light.set(light_pos=Rodrigues(self.camera.rt).T.dot(self.camera.t) * -10.)
            self.rn.set(frustum=self.frustum, camera=self.camera)

            self.draw()

    def _save_config_dialog(self):
        filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save config', None, 'Config File (*.ini)')
        if filename:
            with open(str(filename), 'w') as fp:
                config = ConfigParser.ConfigParser()
                config.add_section('Model')
                config.set('Model', 'gender', self._loaded_gender)
                config.set('Model', 'shape', ','.join(str(s) for s in self.model.betas.r))
                config.set('Model', 'pose', ','.join(str(p) for p in self.model.pose.r))
                config.set('Model', 'translation', ','.join(str(p) for p in self.model.trans.r))

                config.add_section('Camera')
                config.set('Camera', 'translation', ','.join(str(t) for t in self.camera.t.r))
                config.set('Camera', 'rotation', ','.join(str(r) for r in self.camera.rt.r))
                config.set('Camera', 'focal_length', self.camera_widget.focal_len.value())
                config.set('Camera', 'center', '{},{}'.format(self.camera_widget.center_0.value(),
                                                              self.camera_widget.center_1.value()))
                config.set('Camera', 'distortion', ','.join(str(r) for r in self.camera.k.r))

                config.write(fp)

    def _open_config_dialog(self):
        filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Load config', None, 'Config File (*.ini)')
        if filename:
            config = ConfigParser.ConfigParser()
            config.read(str(filename))

            self._update_canvas = False
            self._init_model(config.get('Model', 'gender'))

            shapes = np.fromstring(config.get('Model', 'shape'), dtype=np.float64, sep=',')
            poses = np.fromstring(config.get('Model', 'pose'), dtype=np.float64, sep=',')
            position = np.fromstring(config.get('Model', 'translation'), dtype=np.float64, sep=',')

            for key, shape in self._shapes():
                val = shapes[key] / 5.0 * 50.0 + 50.0
                shape.setValue(val)
            for key, pose in self._poses():
                if key == 0:
                    val = (poses[key] - np.pi) / np.pi * 50.0 + 50.0
                else:
                    val = poses[key] / np.pi * 50.0 + 50.0
                pose.setValue(val)

            self.pos_0.setValue(position[0])
            self.pos_1.setValue(position[1])
            self.pos_2.setValue(position[2])

            cam_pos = np.fromstring(config.get('Camera', 'translation'), dtype=np.float64, sep=',')
            cam_rot = np.fromstring(config.get('Camera', 'rotation'), dtype=np.float64, sep=',')
            cam_dist = np.fromstring(config.get('Camera', 'distortion'), dtype=np.float64, sep=',')
            cam_c = np.fromstring(config.get('Camera', 'center'), dtype=np.float64, sep=',')
            cam_f = config.getfloat('Camera', 'focal_length')
            print cam_c
            self.camera_widget.set_values(cam_pos, cam_rot, cam_f, cam_c, cam_dist)

            self._update_canvas = True
            self.draw()

    def _save_screenshot_dialog(self):
        filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save screenshot', None, 'Images (*.png *.jpg *.ppm)')
        if filename:
            img = np.array(self.rn.r)
            if self.view_joints.isChecked() or self.view_joint_ids.isChecked() or self.view_bones.isChecked():
                img = self._draw_annotations(img)
            cv2.imwrite(str(filename), np.uint8(img * 255))

    def _save_mesh_dialog(self):
        filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save mesh', None, 'Mesh (*.obj)')
        if filename:
            with open(filename, 'w') as fp:
                for v in self.model.r:
                    fp.write('v %f %f %f\n' % (v[0], v[1], v[2]))

                for f in self.model.f + 1:
                    fp.write('f %d %d %d\n' % (f[0], f[1], f[2]))

    def _zoom(self, event):
        delta = -event.angleDelta().y() / 1200.0
        self.camera_widget.pos_2.setValue(self.camera_widget.pos_2.value() + delta)

    def _mouse_begin(self, event):
        if event.button() == 1:
            self._moving = True
        elif event.button() == 2:
            self._rotating = True
        self._mouse_begin_pos = event.pos()

    def _mouse_end(self, event):
        self._moving = False
        self._rotating = False

    def _move(self, event):
        if self._moving:
            delta = event.pos() - self._mouse_begin_pos
            self.camera_widget.pos_0.setValue(self.camera_widget.pos_0.value() + delta.x() / 500.)
            self.camera_widget.pos_1.setValue(self.camera_widget.pos_1.value() + delta.y() / 500.)
            self._mouse_begin_pos = event.pos()
        elif self._rotating:
            delta = event.pos() - self._mouse_begin_pos
            self.camera_widget.rot_0.setValue(self.camera_widget.rot_0.value() + delta.y() / 300.)
            self.camera_widget.rot_1.setValue(self.camera_widget.rot_1.value() - delta.x() / 300.)
            self._mouse_begin_pos = event.pos()

    def _show_camera_widget(self):
        self.camera_widget.show()
        self.camera_widget.raise_()

    def _update_shape(self, id, val):
        val = (val - 50) / 50.0 * 5.0
        self.model.betas[id] = val
        self.draw()

    def _reset_shape(self):
        self._update_canvas = False
        for key, shape in self._shapes():
            shape.setValue(50)
        self._update_canvas = True
        self.draw()

    def _update_pose(self, id, val):
        val = (val - 50) / 50.0 * np.pi

        if id == 0:
            val += np.pi

        self.model.pose[id] = val
        self.draw()

    def _reset_pose(self):
        self._update_canvas = False
        for key, pose in self._poses():
            pose.setValue(50)
        self._update_canvas = True
        self.draw()

    def _update_position(self, id, val):
        self.model.trans[id] = val
        self.draw()

    def _reset_position(self):
        self._update_canvas = False
        self.pos_0.setValue(0)
        self.pos_1.setValue(0)
        self.pos_2.setValue(0)
        self._update_canvas = True
        self.draw()

    def _poses(self):
        return enumerate([
            self.pose_0,
            self.pose_1,
            self.pose_2,
            self.pose_3,
            self.pose_4,
            self.pose_5,
            self.pose_6,
            self.pose_7,
            self.pose_8,
            self.pose_9,
            self.pose_10,
            self.pose_11,
            self.pose_12,
            self.pose_13,
            self.pose_14,
            self.pose_15,
            self.pose_16,
            self.pose_17,
            self.pose_18,
            self.pose_19,
            self.pose_20,
            self.pose_21,
            self.pose_22,
            self.pose_23,
            self.pose_24,
            self.pose_25,
            self.pose_26,
            self.pose_27,
            self.pose_28,
            self.pose_29,
            self.pose_30,
            self.pose_31,
            self.pose_32,
            self.pose_33,
            self.pose_34,
            self.pose_35,
            self.pose_36,
            self.pose_37,
            self.pose_38,
            self.pose_39,
            self.pose_40,
            self.pose_41,
            self.pose_42,
            self.pose_43,
            self.pose_44,
            self.pose_45,
            self.pose_46,
            self.pose_47,
            self.pose_48,
            self.pose_49,
            self.pose_50,
            self.pose_51,
            self.pose_52,
            self.pose_53,
            self.pose_54,
            self.pose_55,
            self.pose_56,
            self.pose_57,
            self.pose_58,
            self.pose_59,
            self.pose_60,
            self.pose_61,
            self.pose_62,
            self.pose_63,
            self.pose_64,
            self.pose_65,
            self.pose_66,
            self.pose_67,
            self.pose_68,
            self.pose_69,
            self.pose_70,
            self.pose_71,
        ])

    def _shapes(self):
        return enumerate([
            self.shape_0,
            self.shape_1,
            self.shape_2,
            self.shape_3,
            self.shape_4,
            self.shape_5,
            self.shape_6,
            self.shape_7,
            self.shape_8,
            self.shape_9,
        ])

    @staticmethod
    def _to_pixmap(im):
        if im.dtype == np.float32 or im.dtype == np.float64:
            im = np.uint8(im * 255)

        if len(im.shape) < 3 or im.shape[-1] == 1:
            im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
        else:
            im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

        qimg = QtGui.QImage(im, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_RGB888)

        return QtGui.QPixmap(qimg)