예제 #1
0
 def __init__(self, width=800, height=600, near=0.5, far=1000, faces=None):
     self.colors = {
         'pink': [.9, .7, .7],
         'light_blue': [0.65098039, 0.74117647, 0.85882353]
     }
     self.width = width
     self.height = height
     self.faces = faces
     self.renderer = ColoredRenderer()
예제 #2
0
    def __init__(self):
        super(self.__class__, self).__init__()
        self.setupUi(self)

        self._moving = False
        self._rotating = False
        self._mouse_begin_pos = None
        self._loaded_gender = None
        self._update_canvas = False

        self.camera = ProjectPoints(rt=np.zeros(3), t=np.zeros(3))
        self.joints2d = ProjectPoints(rt=np.zeros(3), t=np.zeros(3))
        self.frustum = {'near': 0.1, 'far': 1000., 'width': 100, 'height': 30}
        self.light = LambertianPointLight(vc=np.array([0.94, 0.94, 0.94]), light_color=np.array([1., 1., 1.]))
        self.rn = ColoredRenderer(bgcolor=np.ones(3), frustum=self.frustum, camera=self.camera, vc=self.light,
                                  overdraw=False)

        self.model = None
        self._init_model('f')
        self.model.pose[0] = np.pi

        self.camera_widget = Ui_CameraWidget(self.camera, self.frustum, self.draw)
        self.btn_camera.clicked.connect(lambda: self._show_camera_widget())

        for key, shape in self._shapes():
            shape.valueChanged[int].connect(lambda val, k=key: self._update_shape(k, val))

        for key, pose in self._poses():
            pose.valueChanged[int].connect(lambda val, k=key: self._update_pose(k, val))

        self.pos_0.valueChanged[float].connect(lambda val: self._update_position(0, val))
        self.pos_1.valueChanged[float].connect(lambda val: self._update_position(1, val))
        self.pos_2.valueChanged[float].connect(lambda val: self._update_position(2, val))

        self.radio_f.pressed.connect(lambda: self._init_model('f'))
        self.radio_m.pressed.connect(lambda: self._init_model('m'))

        self.reset_pose.clicked.connect(self._reset_pose)
        self.reset_shape.clicked.connect(self._reset_shape)
        self.reset_postion.clicked.connect(self._reset_position)

        self.canvas.wheelEvent = self._zoom
        self.canvas.mousePressEvent = self._mouse_begin
        self.canvas.mouseMoveEvent = self._move
        self.canvas.mouseReleaseEvent = self._mouse_end

        self.action_save.triggered.connect(self._save_config_dialog)
        self.action_open.triggered.connect(self._open_config_dialog)
        self.action_save_screenshot.triggered.connect(self._save_screenshot_dialog)
        self.action_save_mesh.triggered.connect(self._save_mesh_dialog)

        self.view_joints.triggered.connect(self.draw)
        self.view_joint_ids.triggered.connect(self.draw)
        self.view_bones.triggered.connect(self.draw)

        self._update_canvas = True
def _create_renderer(w=640,
                     h=480,
                     rt=np.zeros(3),
                     t=np.zeros(3),
                     f=None,
                     c=None,
                     k=None,
                     near=.5,
                     far=10.):
    f = np.array([w, w]) / 2. if f is None else f
    c = np.array([w, h]) / 2. if c is None else c
    k = np.zeros(5) if k is None else k

    rn = ColoredRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}

    flipXRotation = np.array([[1.0, 0.0, 0.0, 0.0],
            [0.0, -1.0, 0., 0.0],
            [0.0, 0., -1.0, 0.0],
            [0.0, 0.0, 0.0, 1.0]])
    rn.camera.openglMat = flipXRotation #this is from setupcamera in utils
    rn.glMode = 'glfw'
    rn.sharedWin = None
    rn.overdraw = True
    rn.nsamples = 8
    rn.msaa = True  #Without anti-aliasing optimization often does not work.

    return rn
예제 #4
0
 def __init__(self, width=800, height=600, near=0.5, far=1000, faces=None):
     self.colors = {
         'pink': [.9, .7, .7],
         'light_blue': [0.65098039, 0.74117647, 0.85882353],
         'blue': [0.65098039, 0.74117647, 0.85882353],
         'green': [180.0 / 255.0, 238.0 / 255.0, 180.0 / 255],
         'tan': [1.0, 218.0 / 255, 185.0 / 255]
     }
     self.width = width
     self.height = height
     self.faces = faces
     self.renderer = ColoredRenderer()
예제 #5
0
def render_color(mesh, f, Rt, img_size=(224, 224), bg_img=None):

    # apply Rt
    verts = mesh.points().copy()
    for i in range(len(verts)):
        verts[i] = np.dot(Rt[:3, :3], verts[i]) + Rt[:, 3]

    rn = ColoredRenderer()
    if bg_img is not None:
        rn.background_image = bg_img.astype(
            np.float) / 300. if bg_img.max() > 1 else bg_img

    rn.camera = ProjectPoints(v=verts,
                              rt=np.zeros(3),
                              t=np.zeros(3),
                              f=np.array([f, f]),
                              c=np.array([img_size[1], img_size[0]]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {
        'near': .5,
        'far': 10.,
        'width': img_size[1],
        'height': img_size[0]
    }
    rn.v = verts
    rn.f = mesh.face_vertex_indices()
    rn.bgcolor = np.zeros(3)

    rn.vc = vc = mesh.vertex_colors()[:, :3]
    return rn.r
예제 #6
0
def main(mesh_list, out_list, scale=1.0, move_scale=True):
    assert len(mesh_list) == len(out_list)
    for mesh_file, out_file in zip(mesh_list, out_list):
        mesh = load_obj_data_binary(mesh_file)
        if move_scale:  # move to center and scale to unit bounding box
            mesh['v'] = (mesh['v'] - np.array([128, -192, 128]) +
                         0.5) * voxel_size

        if not ('vn' in mesh and mesh['vn'] is not None):
            mesh['vn'] = np.array(VertNormals(f=mesh['f'], v=mesh['v']))

        V = ch.array(mesh['v']) * scale
        V -= trans

        C = np.ones_like(mesh['v'])
        C *= np.array([186, 212, 255], dtype=np.float32) / 255.0
        # C *= np.array([158, 180, 216], dtype=np.float32) / 250.0
        C = np.minimum(C, 1.0)
        A = np.zeros_like(mesh['v'])
        A += LambertianPointLight(f=mesh['f'],
                                  v=V,
                                  vn=-mesh['vn'],
                                  num_verts=len(V),
                                  light_pos=np.array([0, -50, -50]),
                                  light_color=np.array([1.0, 1.0, 1.0]),
                                  vc=C)

        cam_t, cam_r = ch.array((0, 0, 0)), ch.array((3.14, 0, 0))
        U = ProjectPoints(v=V,
                          f=[flength, flength],
                          c=[w / 2., h / 2.],
                          k=ch.zeros(5),
                          t=cam_t,
                          rt=cam_r)
        rn = ColoredRenderer(camera=U,
                             v=V,
                             f=mesh['f'],
                             vc=A,
                             bgcolor=np.array([1.0, 0.0, 0.0]),
                             frustum={
                                 'width': w,
                                 'height': h,
                                 'near': 0.1,
                                 'far': 20
                             })

        img = np.asarray(rn)[:, :, (2, 1, 0)]
        msk = np.sum(np.abs(img - np.array([[[0, 0, 1.0]]], dtype=np.float32)),
                     axis=-1,
                     keepdims=True)
        msk[msk > 0] = 1
        img = cv.resize(img, (img.shape[1] // 2, img.shape[0] // 2))
        msk = cv.resize(msk, (msk.shape[1] // 2, msk.shape[0] // 2),
                        interpolation=cv.INTER_AREA)
        msk[msk < 1] = 0
        msk = msk[:, :, np.newaxis]
        img = np.concatenate([img, msk], axis=-1)
        cv.imshow('render3', img)
        cv.waitKey(3)
        cv.imwrite(out_file, np.uint8(img * 255))
예제 #7
0
def render_mask(w, h, v, f, u):
    """renders silhouette"""
    V = ch.array(v)
    A = np.ones(v.shape)
    rn = ColoredRenderer(camera=u, v=V, f=f, vc=A, bgcolor=ch.zeros(3),
                         frustum={'width': w, 'height': h, 'near': 0.1, 'far': 20})
    return rn.r
예제 #8
0
def _create_renderer(  # pylint: disable=too-many-arguments
        w=640,
        h=480,
        rt=np.zeros(3),
        t=np.zeros(3),
        f=None,
        c=None,
        k=None,
        near=1.,
        far=10.,
        texture=None):
    """Create a renderer for the specified parameters."""
    f = np.array([w, w]) / 2. if f is None else f
    c = np.array([w, h]) / 2. if c is None else c
    k = np.zeros(5) if k is None else k

    if texture is not None:
        rn = TexturedRenderer()
    else:
        rn = ColoredRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
    if texture is not None:
        rn.texture_image = np.asarray(cv2.imread(texture), np.float64) / 255.
    return rn
예제 #9
0
def create_renderer(w=640,
                    h=480,
                    rt=np.zeros(3),
                    t=np.zeros(3),
                    f=None,
                    c=None,
                    k=None,
                    near=.5,
                    far=10.,
                    mesh=None):
    f = np.array([w, w]) / 2. if f is None else f
    c = np.array([w, h]) / 2. if c is None else c
    k = np.zeros(5) if k is None else k

    if mesh is not None and hasattr(mesh, 'texture_image'):
        from opendr.renderer import TexturedRenderer
        rn = TexturedRenderer()
        rn.texture_image = mesh.texture_image
        if rn.texture_image.max() > 1:
            rn.texture_image[:] = rn.texture_image[:].r / 255.
        rn.ft = mesh.ft
        rn.vt = mesh.vt
    else:
        from opendr.renderer import ColoredRenderer
        rn = ColoredRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
    return rn
예제 #10
0
def render_color_model_with_lighting(w, h, v, vn, vc, f, u,
                                      sh_comps=None, light_c=ch.ones(3),
                                      vlight_pos=None, vlight_color=None,
                                      bg_img=None):
    """renders colored model with lighting effect"""
    assert(sh_comps is not None or vlight_pos is not None)
    V = ch.array(v)
    A = np.zeros_like(v)

    # SH lighting
    if sh_comps is not None:
        A += vc * SphericalHarmonics(vn=vn, components=sh_comps, light_color=light_c)

    # single point lighting (grey light)
    if vlight_color is not None and vlight_pos is not None \
            and len(vlight_pos.shape) == 1:
        A += LambertianPointLight(f=f, v=v, num_verts=len(v), light_pos=vlight_pos,
                                  light_color=vlight_color, vc=vc)

    # multiple point lighting (grey light)
    if vlight_color is not None and vlight_pos is not None \
            and len(vlight_pos.shape) == 2:
        for vlp in vlight_pos:
            A += LambertianPointLight(f=f, v=v, num_verts=len(v), light_pos=vlp,
                                      light_color=vlight_color, vc=vc)

    black_img = np.array(np.zeros((w, h, 3)), dtype=np.float32)
    bg_img_ = bg_img if bg_img is not None else black_img

    rn = ColoredRenderer(camera=u, v=V, f=f, vc=A, background_image=bg_img_,
                         frustum={'width': w, 'height': h, 'near': 0.1, 'far': 20})
    return rn.r
def fit_pose(frame, last_smpl, frustum, nohands, viz_rn):

    if nohands:
        faces = faces_no_hands(frame.smpl.f)
    else:
        faces = frame.smpl.f

    dst_type = cv2.cv.CV_DIST_L2 if cv2.__version__[0] == '2' else cv2.DIST_L2

    dist_i = cv2.distanceTransform(np.uint8(frame.mask * 255), dst_type, 5) - 1
    dist_i[dist_i < 0] = 0
    dist_i[dist_i > 50] = 50
    dist_o = cv2.distanceTransform(255 - np.uint8(frame.mask * 255), dst_type,
                                   5)
    dist_o[dist_o > 50] = 50

    rn_m = ColoredRenderer(camera=frame.camera,
                           v=frame.smpl,
                           f=faces,
                           vc=np.ones_like(frame.smpl),
                           frustum=frustum,
                           bgcolor=0,
                           num_channels=1)

    E = {
        'mask':
        gaussian_pyramid(rn_m * dist_o * 100. + (1 - rn_m) * dist_i,
                         n_levels=4,
                         normalization='size') * 80.,
        '2dpose':
        GMOf(frame.pose_obj, 100),
        'prior':
        frame.pose_prior_obj * 4.,
        'sp':
        frame.collision_obj * 1e3,
    }

    if last_smpl is not None:
        E['last_pose'] = GMOf(frame.smpl.pose - last_smpl.pose, 0.05) * 50.
        E['last_trans'] = GMOf(frame.smpl.trans - last_smpl.trans, 0.05) * 50.

    if nohands:
        x0 = [
            frame.smpl.pose[range(21) + range(27, 30) + range(36, 60)],
            frame.smpl.trans
        ]
    else:
        x0 = [
            frame.smpl.pose[range(21) + range(27, 30) + range(36, 72)],
            frame.smpl.trans
        ]

    ch.minimize(E,
                x0,
                method='dogleg',
                options={
                    'e_3': .01,
                },
                callback=get_cb(viz_rn, frame))
예제 #12
0
    def __init__(self, m):
        self.m = m
        self.m.betas[:] = np.random.rand(m.betas.size) * .3
        # m.pose[:] = np.random.rand(m.pose.size) * .2
        self.m.pose[:3] = [0., 0., 0.]
        self.m.pose[3:] = np.zeros(45)
        # m.pose[3:] = [-0.42671473, -0.85829819, -0.50662164, +1.97374622, -0.84298473, -1.29958491]
        self.m.pose[0] = np.pi

        # compute inverse components to map from fullpose spec to coefficients
        hands_components = np.asarray(m.hands_components)
        self.hands_components_inv = np.linalg.inv(hands_components)

        # rendering components
        # Assign attributes to renderer
        w, h = (640, 480)

        # Create OpenDR renderer
        self.rn = ColoredRenderer()
        self.rn.camera = ProjectPoints(v=m,
                                       rt=np.zeros(3),
                                       t=np.array([-0.03, -0.04, 0.20]),
                                       f=np.array([w, w]) / 2.,
                                       c=np.array([w, h]) / 2.,
                                       k=np.zeros(5))
        self.rn.frustum = {'near': 0.01, 'far': 2., 'width': w, 'height': h}
        self.rn.set(v=m, f=m.f, bgcolor=np.zeros(3))

        # Construct point light source
        self.rn.vc = LambertianPointLight(f=m.f,
                                          v=self.rn.v,
                                          num_verts=len(m),
                                          light_pos=np.array(
                                              [-1000, -1000, -2000]),
                                          vc=np.ones_like(m) * .9,
                                          light_color=np.array([1., 1., 1.]))
        self.rn.vc += LambertianPointLight(f=m.f,
                                           v=self.rn.v,
                                           num_verts=len(m),
                                           light_pos=np.array(
                                               [+2000, +2000, +2000]),
                                           vc=np.ones_like(m) * .9,
                                           light_color=np.array([1., 1., 1.]))
        self.mvs = MeshViewers(window_width=2000,
                               window_height=800,
                               shape=[1, 3])
예제 #13
0
    def standard_render(self):

        ## Create OpenDR renderer
        rn = ColoredRenderer()

        ## Assign attributes to renderer
        w, h = (640, 480)

        rn.camera = ProjectPoints(v=self.m,
                                  rt=np.zeros(3),
                                  t=np.array([0, 0, 2.]),
                                  f=np.array([w, w]) / 2.,
                                  c=np.array([w, h]) / 2.,
                                  k=np.zeros(5))
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        rn.set(v=self.m, f=self.m.f, bgcolor=np.zeros(3))

        ## Construct point light source
        rn.vc = LambertianPointLight(f=self.m.f,
                                     v=rn.v,
                                     num_verts=len(self.m),
                                     light_pos=np.array([-1000, -1000, -2000]),
                                     vc=np.ones_like(self.m) * .9,
                                     light_color=np.array([1., 1., 1.]))

        ## Show it using OpenCV
        import cv2
        cv2.imshow('render_SMPL', rn.r)
        print('..Print any key while on the display window')
        cv2.waitKey(0)
        cv2.destroyAllWindows()
예제 #14
0
def render(verts, faces, w=640, h=480):
    # Frontal view
    verts[:, 1:3] = -verts[:, 1:3]

    # Create OpenDR renderer
    rn = ColoredRenderer()

    # Assign attributes to renderer
    rn.camera = ProjectPoints(v=verts,
                              rt=np.zeros(3),
                              t=np.array([0., 0., 2.]),
                              f=np.array([w, h]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=verts, f=faces, bgcolor=np.zeros(3))

    # Construct point light source
    rn.vc = LambertianPointLight(f=rn.f,
                                 v=rn.v,
                                 num_verts=len(verts),
                                 light_pos=np.array([1000, -1000, -2000]),
                                 vc=np.ones_like(verts) * .9,
                                 light_color=np.array([1., 1., 1.]))

    return rn.r
예제 #15
0
def Render():

    verts = np.load('../../resault/verts.npy')
    faces = np.load('../../resault/faces.npy')

    rn = ColoredRenderer()
    w, h = (640, 480)

    rn.camera = ProjectPoints(v=verts,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 2.]),
                              f=np.array([w, w]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 0.8, 'far': 16., 'width': w, 'height': h}
    rn.set(v=verts, f=faces, bgcolor=np.array([255, 255, 255]))

    rn.vc = LambertianPointLight(f=faces,
                                 v=rn.v,
                                 num_verts=len(verts),
                                 light_pos=np.array([-1000, -1000, -2000]),
                                 vc=np.ones_like(verts) * .9,
                                 light_color=np.array([1., 1., 1.]))

    # import cv2
    #
    # cv2.imshow('render_SMPL', rn.r)
    # cv2.waitKey(0)
    import matplotlib.pyplot as plt
    plt.ion()
    plt.axis('off')
    plt.imshow(rn.r)
    raw_input()
    plt.show()
예제 #16
0
def renderBody(m):
    from opendr.camera import ProjectPoints
    from opendr.renderer import ColoredRenderer
    from opendr.lighting import LambertianPointLight
    # Create OpenDR renderer
    rn = ColoredRenderer()
    # Assign attributes to renderer
    w, h = (640, 480)
    rn.camera = ProjectPoints(v=m,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 2.]),
                              f=np.array([w, w]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=m, f=m.f, bgcolor=np.zeros(3))
    # Construct point light source
    rn.vc = LambertianPointLight(f=m.f,
                                 v=rn.v,
                                 num_verts=len(m),
                                 light_pos=np.array([-1000, -1000, -2000]),
                                 vc=np.ones_like(m) * .9,
                                 light_color=np.array([1., 1., 1.]))
    plt.ion()
    plt.imshow(np.fliplr(rn.r))  # FLIPPED!
    plt.show()
    plt.xticks([])
    plt.yticks([])
예제 #17
0
def render_color_model_without_lighting(w, h, v, vc, f, u, bg_img=None):
    """renders colored model without lighting effect"""
    V = ch.array(v)
    A = vc
    black_img = np.array(np.zeros((w, h, 3)), dtype=np.float32)
    bg_img_ = bg_img if bg_img is not None else black_img
    rn = ColoredRenderer(camera=u, v=V, f=f, vc=A, background_image=bg_img_,
                         frustum={'width': w, 'height': h, 'near': 0.1, 'far': 20})
    return rn.r
예제 #18
0
파일: renderer.py 프로젝트: lixiny/bihand
def _create_renderer(w=640,
                     h=480,
                     rt=np.zeros(3),
                     t=np.zeros(3),
                     f=None,
                     c=None,
                     k=None,
                     near=.5,
                     far=10.):
    f = np.array([w, w]) / 2. if f is None else f
    c = np.array([w, h]) / 2. if c is None else c
    k = np.zeros(5) if k is None else k

    rn = ColoredRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
    return rn
예제 #19
0
def mesh2Image(vertices,
               faces,
               batch,
               path,
               name,
               height,
               width,
               vertices_num=6890):
    # Create OpenDR renderer
    rn = ColoredRenderer()

    rt_1 = np.zeros(3)

    rn.camera = ProjectPoints(
        v=vertices,  # vertices
        # v=m,
        rt=rt_1,
        # x, y, z translation of the camera, z>=0    0 0 2
        t=np.array([0, 0, 0]),
        # f=np.array([w,w])/2, # focus length? just scaling the picture
        # c=np.array([w,h])/2, #  just move the picture along top-left axis? not sure
        f=np.array([1, 1]),
        c=np.array([0, 0]),
        k=np.zeros(5))
    rn.frustum = {'near': 1, 'far': 15, 'width': width, 'height': height}
    rn.set(v=vertices, f=faces, bgcolor=np.zeros(3))

    # Construct point light source
    rn.vc = LambertianPointLight(
        f=faces,  # face
        v=vertices,
        # v=rn.v, #vertex?
        num_verts=len(vertices),
        light_pos=np.array([-1000, -1000, -2000]),  # point light position
        vc=np.ones_like(vertices) * .9,  # albedo per vertex
        light_color=np.array([1., 1.,
                              1.]))  # Blue, Green, Red; light intensity

    # make the image binary(black and white); these are actually magic steps
    rn.change_col(np.ones((vertices_num, 3)))
    #mask = rn.r.copy()  # takes lots of time

    mask = rn.r * 255
    import cv2
    if batch == 1:
        cv2.imwrite('%s/%s.png' % (path, name), mask)
    else:
        cv2.imwrite('%s/%s_%d.png' % (path, name, i), mask)
    '''
예제 #20
0
def _create_renderer(w=640,
                     h=480,
                     rt=np.zeros(3),
                     t=np.zeros(3),
                     f=None,
                     c=None,
                     k=None,
                     near=.5,
                     far=10.):

    f = np.array([w, w]) / 2. if f is None else f
    c = np.array([w, h]) / 2. if c is None else c
    k = np.zeros(5) if k is None else k

    rn = ColoredRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
    return rn
def fit_consensus(frames, base_smpl, camera, frustum, model_data, nohands, icp_count, naked, display):
    if nohands:
        faces = faces_no_hands(base_smpl.f)
    else:
        faces = base_smpl.f

    vis_rn_b = BoundaryRenderer(camera=camera, frustum=frustum, f=faces, num_channels=1)
    vis_rn_m = ColoredRenderer(camera=camera, frustum=frustum, f=faces, vc=np.zeros_like(base_smpl), bgcolor=1,
                               num_channels=1)

    model_template = Smpl(model_data)
    model_template.betas[:] = base_smpl.betas.r

    g_laplace = regularize_laplace()
    g_model = regularize_model()
    g_symmetry = regularize_symmetry()

    face_ids = get_face_vertex_ids()

    for step, (w_laplace, w_model, w_symmetry, sigma) in enumerate(zip(
            np.linspace(6.5, 4.0, icp_count) if naked else np.linspace(4.0, 2.0, icp_count),
            np.linspace(0.9, 0.6, icp_count) if naked else np.linspace(0.6, 0.3, icp_count),
            np.linspace(3.6, 1.8, icp_count),
            np.linspace(0.06, 0.003, icp_count),
    )):
        log.info('# Step {}'.format(step))

        L = laplacian(model_template.r, base_smpl.f)
        delta = L.dot(model_template.r)

        w_laplace *= g_laplace.reshape(-1, 1)
        w_model *= g_model.reshape(-1, 1)
        w_symmetry *= g_symmetry.reshape(-1, 1)

        E = {
            'laplace': (sp_dot(L, base_smpl.v_shaped_personal) - delta) * w_laplace,
            'model': (base_smpl.v_shaped_personal - model_template) * w_model,
            'symmetry': (base_smpl.v_personal + np.array([1, -1, -1])
                         * base_smpl.v_personal[model_data['vert_sym_idxs']]) * w_symmetry,
        }

        log.info('## Matching rays with contours')
        for current, f in enumerate(tqdm(frames)):
            E['silh_{}'.format(current)] = ray_objective(f, sigma, base_smpl, camera, vis_rn_b, vis_rn_m)            
            #paper 2
            E['face_{}'.format(current)] = ray_face(f, sigma, base_smpl, camera, face_ids) 

        log.info('## Run optimization')
        ch.minimize(
            E,
            [base_smpl.v_personal, model_template.betas],
            method='dogleg',
            options={'maxiter': 15, 'e_3': 0.001},
            callback=get_cb(frames[0], base_smpl, camera, frustum) if display else None
        )
예제 #22
0
def get_cam_rend(verts, faces, cam_y, cam_z):

    frustum = {'near': 0.1, 'far': 1000., 'width': 1000, 'height': 1000}

    camera = ProjectPoints(v=verts, t=np.array([0, cam_y, cam_z]), rt=np.zeros(3), f=[1000, 1000],
                           c=[1000 / 2., 1000 / 2.], k=np.zeros(5))

    rn_m = ColoredRenderer(camera=camera, v=verts, f=faces, vc=np.ones_like(verts),
                           frustum=frustum, bgcolor=0, num_channels=1)

    return camera, rn_m
def get_cb(frame, base_smpl, camera, frustum):
    viz_mask = frame.mask / 255.
    base_smpl.pose[:] = frame.pose
    camera.t[:] = frame.trans
    camera.rt[:] = 0

    rn = ColoredRenderer(camera=camera, v=base_smpl, f=base_smpl.f, vc=np.ones_like(base_smpl),
                         frustum=frustum, bgcolor=0, num_channels=1)

    def cb(_):
        silh_diff = (rn.r - viz_mask + 1) / 2.
        im.show(silh_diff, waittime=1)

    return cb
예제 #24
0
def _create_renderer(
        w=640,
        h=480,
        rt=np.zeros(3),
        t=np.zeros(3),
        f=None,
        c=None,
        k=None,
        near=0.5,
        far=10.0,
):

    f = np.array([w, w]) / 2.0 if f is None else f
    c = np.array([w, h]) / 2.0 if c is None else c
    k = np.zeros(5) if k is None else k
    # far = 1000
    rn = ColoredRenderer()

    rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
    # print(rn.camera)
    rn.frustum = {"near": near, "far": far, "height": h, "width": w}
    # print(rn.frustum)
    return rn
예제 #25
0
    def generate(self, img_bgr, texture_bgr):
        img = img_bgr
        self.set_texture(texture_bgr)
        vert_shifted, theta, cam_for_render = self.hmr.predict(img)
        pose = theta[self.num_cam:(self.num_cam + self.num_theta)]
        beta = theta[(self.num_cam + self.num_theta):]

        self.body.pose[:] = pose
        self.body.betas[:] = beta

        rn_vis = TexturedRenderer()
        rn_vis.camera = ProjectPoints(t=np.zeros(3),
                                      rt=np.zeros(3),
                                      c=cam_for_render[1:],
                                      f=np.ones(2) * cam_for_render[0],
                                      k=np.zeros(5),
                                      v=vert_shifted)
        rn_vis.frustum = {
            'near': 0.1,
            'far': 1000.,
            'width': self.width,
            'height': self.height
        }
        rn_vis.set(v=vert_shifted,
                   f=self.m.f,
                   vc=self.m.vc,
                   texture_image=self.m.texture_image,
                   ft=self.m.ft,
                   vt=self.m.vt,
                   bgcolor=np.zeros(3))
        # rn_vis.background_image = img_bgr / 255. if img_bgr.max() > 1 else img_bgr

        out_img = rn_vis.r
        out_img = (out_img * 255).astype(np.uint8)
        out_img = cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR)

        silhouette_rn = ColoredRenderer()
        silhouette_rn.camera = ProjectPoints(v=self.body,
                                             rt=np.zeros(3),
                                             t=np.zeros(3),
                                             f=np.ones(2) * cam_for_render[0],
                                             c=cam_for_render[1:],
                                             k=np.zeros(5))
        silhouette_rn.frustum = {
            'near': 0.1,
            'far': 1000.,
            'width': self.width,
            'height': self.height
        }
        silhouette_rn.set(v=vert_shifted,
                          f=self.m.f,
                          vc=self.m.vc,
                          bgcolor=np.zeros(3))

        return out_img, texture_dr_wrt(rn_vis,
                                       silhouette_rn.r), silhouette_rn.r
예제 #26
0
def create_synth(verts, joints, skin_color, f, ss, tu, tv, rot, w, h, bg):
       rn = ColoredRenderer()
       R = cv2.Rodrigues(rot)[0]
       verts = np.transpose(np.matmul(R, np.transpose(verts)))
       joints = np.transpose(np.matmul(R, np.transpose(joints)))
       verts_3d = verts
       joints_3d = joints

       verts = np.array([[ss, ss, 1], ] * 778) * verts
       joints = np.array([[ss, ss, 1], ] * 21) * joints

       verts = verts + np.array([[tu, tv, 0], ] * 778)
       joints = joints + np.array([[tu, tv, 0], ] * 21)

       umax = np.max(verts[:, 0])
       umin = np.min(verts[:, 0])
       vmax = np.max(verts[:, 1])
       vmin = np.min(verts[:, 1])
       if ((umin < 0.) or (vmin < 0.) or (umax > w) or (vmax > h)):
              print('mesh outside')

       verts[:, 2] = 10. + (verts[:, 2] - np.mean(verts[:, 2]))
       verts[:, :2] = verts[:, :2] * np.expand_dims(verts[:, 2], 1)

       rn.camera = ProjectPoints(v=verts, rt=np.zeros(3), t=np.array([0, 0, 0]), f=np.array([1, 1]),
                                 c=np.array([0, 0]), k=np.zeros(5))

       rn.frustum = {'near': 1., 'far': 20., 'width': w, 'height': h}
       rn.set(v=verts, f=f, bgcolor=np.zeros(3))
       rn.vc = np.ones((778, 3))

       mask = rn.r.copy()
       mask = mask[:, :, 0].astype(np.uint8)

       rn.vc = skin_color
       hand = rn.r.copy() * 255.

       image = (1 - np.expand_dims(mask, 2)) * bg + np.expand_dims(mask, 2) * hand
       image = image.astype(np.uint8)

       image = Image.fromarray(image).resize((224, 224), Image.LANCZOS)
       return image, mask, verts_3d, joints_3d, verts, joints
예제 #27
0
 def __init__(self, resolution=(224, 224), ratio=1):
     self.resolution = (resolution[0] * ratio, resolution[1] * ratio)
     self.ratio = ratio
     self.focal_length = 5000.
     self.K = np.array([[self.focal_length, 0., self.resolution[1] / 2.],
                        [0., self.focal_length, self.resolution[0] / 2.],
                        [0., 0., 1.]])
     self.colors_dict = {
         'red': np.array([0.5, 0.2, 0.2]),
         'pink': np.array([0.7, 0.5, 0.5]),
         'neutral': np.array([0.7, 0.7, 0.6]),
         'purple': np.array([0.5, 0.5, 0.7]),
         'green': np.array([0.5, 0.55, 0.3]),
         'sky': np.array([0.3, 0.5, 0.55]),
         'white': np.array([1.0, 0.98, 0.94]),
     }
     self.renderer = ColoredRenderer()
     self.faces = get_smpl_faces()
예제 #28
0
def render_smpl(m):
    # Create OpenDR renderer
    rn = ColoredRenderer()
    # Assign attributes to renderer
    w, h = (640, 480)
    rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array(
        [0, 0, 2.]), f=np.array([w, w])/2., c=np.array([w, h])/2., k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=m, f=m.f, bgcolor=np.zeros(3))
    # Construct point light source
    rn.vc = LambertianPointLight(
        f=m.f,
        v=rn.v,
        num_verts=len(m),
        light_pos=np.array([-1000, -1000, -2000]),
        vc=np.ones_like(m)*.9,
        light_color=np.array([1., 1., 1.]))
    image = rn.r * 255
    return image
예제 #29
0
    def render(self, thetas, texture_bgr, rotate=np.array([0, 0, 0]), background_img=None):
        """
        get the rendered image and rendered silhouette
        :param thetas: model parameters, 3 * camera parameter + 72 * body pose + 10 * body shape
        :param texture_bgr: texture image in bgr format
        :return: the rendered image and deviation of rendered image to texture image
        (rendered image, deviation of rendered image, silhouette)
        """
        self.set_texture(texture_bgr)
        thetas = thetas.reshape(-1)
        cams = thetas[:self.num_cam]
        theta = thetas[self.num_cam: (self.num_cam + self.num_theta)]
        beta = thetas[(self.num_cam + self.num_theta):]

        self.body.pose[:] = theta
        self.body.betas[:] = beta

        #
        # size = cams[0] * min(self.w, self.h)
        # position = cams[1:3] * min(self.w, self.h) / 2 + min(self.w, self.h) / 2
        """
        ####################################################################
        ATTENTION!
        I do not know why the flength is 500.
        But it worked
        ####################################################################
        """

        texture_rn = TexturedRenderer()
        texture_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]),
                                          f=np.ones(2) * self.img_size * 0.62,
                                          c=np.array([self.w / 2, self.h / 2]),
                                          k=ch.zeros(5))
        texture_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h}
        texture_rn.set(v=self.body, f=self.m.f, vc=self.m.vc, texture_image=self.m.texture_image, ft=self.m.ft,
                       vt=self.m.vt)
        if background_img is not None:
            texture_rn.background_image = background_img / 255. if background_img.max() > 1 else background_img

        silhouette_rn = ColoredRenderer()
        silhouette_rn.camera = ProjectPoints(v=self.body, rt=rotate, t=ch.array([0, 0, 2]),
                                             f=np.ones(2) * self.img_size * 0.62,
                                             c=np.array([self.w / 2, self.h / 2]),
                                             k=ch.zeros(5))
        silhouette_rn.frustum = {'near': 1., 'far': 10., 'width': self.w, 'height': self.h}
        silhouette_rn.set(v=self.body, f=self.m.f, vc=np.ones_like(self.body), bgcolor=np.zeros(3))

        return texture_rn.r, texture_dr_wrt(texture_rn, silhouette_rn.r), silhouette_rn.r
예제 #30
0
def render_smpl(par, theta, beta, img_out_file, model_path, front_view=False):
    m = load_model(model_path)
    ## Assign the given pose
    m.pose[:] = theta
    m.betas[:] = beta
    # Define specific parameters for showing a front view of the rendering
    if front_view:
        m.pose[:3] = np.array([np.pi, 0, 0], dtype=np.float32)
        rt = np.zeros(3)
        light_source = np.array([-1000, -1000, -2000])
    else:
        rt = np.array([3.14, 0, 0])
        light_source = np.array([1000, 1000, 2000])

    ## Create OpenDR renderer
    rn = ColoredRenderer()
    ## Assign attributes to renderer
    w, h = (640, 480)
    rn.camera = ProjectPoints(v=m,
                              rt=rt,
                              t=np.array([0, 0, 2.]),
                              f=np.array([w, w]) / 2.,
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=m, f=m.f, bgcolor=np.zeros(3))

    ## Construct point light source
    rn.vc = LambertianPointLight(
        f=m.f,
        v=rn.v,
        num_verts=len(m),
        #light_pos=np.array([-1000,-1000,-2000]),
        light_pos=light_source,
        vc=np.ones_like(m) * .9,
        light_color=np.array([1., 1., 1.]))

    cv2.imwrite(img_out_file, rn.r * 255.0)
예제 #31
0
    return trans, pose


if __name__ == '__main__':
    smpl = Smpl(
        model='../vendor/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
    smpl.pose[:] = np.random.randn(72) * .2
    smpl.pose[0] = np.pi
    # smpl.v_personal[:] = np.random.randn(*smpl.shape) / 500.

    # render test
    from opendr.renderer import ColoredRenderer
    from opendr.camera import ProjectPoints
    from opendr.lighting import LambertianPointLight

    rn = ColoredRenderer()

    # Assign attributes to renderer
    w, h = (640, 480)

    rn.camera = ProjectPoints(v=smpl,
                              rt=np.zeros(3),
                              t=np.array([0, 0, 3.]),
                              f=np.array([w, w]),
                              c=np.array([w, h]) / 2.,
                              k=np.zeros(5))
    rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
    rn.set(v=smpl, f=smpl.f, bgcolor=np.zeros(3))

    # Construct point light source
    rn.vc = LambertianPointLight(f=smpl.f,
예제 #32
0
    def test_occlusion(self):
        if visualize:
            import matplotlib.pyplot as plt
            plt.ion()

        # Create renderer
        import chumpy as ch
        import numpy as np
        from opendr.renderer import TexturedRenderer, ColoredRenderer
        #rn = TexturedRenderer()
        rn = ColoredRenderer()

        # Assign attributes to renderer
        from util_tests import get_earthmesh
        m = get_earthmesh(trans=ch.array([0,0,4]), rotation=ch.zeros(3))
        rn.texture_image = m.texture_image
        rn.ft = m.ft
        rn.vt = m.vt
        m.v[:,2] = np.mean(m.v[:,2])

        # red is front and zero
        # green is back and 1
        t0 = ch.array([1,0,.1])
        t1 = ch.array([-1,0,.1])
        v0 = ch.array(m.v) + t0

        if False:
            v1 = ch.array(m.v*.4 + np.array([0,0,3.8])) + t1
        else:
            v1 = ch.array(m.v) + t1
        vc0 = v0*0 + np.array([[.4,0,0]])
        vc1 = v1*0 + np.array([[0,.4,0]])
        vc = ch.vstack((vc0, vc1))

        v = ch.vstack((v0, v1))
        f = np.vstack((m.f, m.f+len(v0)))

        w, h = (320, 240)
        rn.camera = ProjectPoints(v=v, rt=ch.zeros(3), t=ch.zeros(3), f=ch.array([w,w])/2., c=ch.array([w,h])/2., k=ch.zeros(5))
        rn.camera.t = ch.array([0,0,-2.5])
        rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
        m.vc = v.r*0 + np.array([[1,0,0]])
        rn.set(v=v, f=f, vc=vc)

        t0[:] = np.array([1.4, 0, .1-.02])
        t1[:] = np.array([-0.6, 0, .1+.02])

        target = rn.r

        if visualize:
            plt.figure()
            plt.imshow(target)
            plt.title('target')

            plt.figure()
            plt.show()

        im_orig = rn.r.copy()

        from cvwrap import cv2

        tr = t0
        eps_emp = .02
        eps_pred = .02

        #blur = lambda x : cv2.blur(x, ksize=(5,5))
        blur = lambda x : x
        for tr in [t0, t1]:
            if tr is t0:
                sum_limits = np.array([2.1e+2, 6.9e+1, 1.6e+2])
            else:
                sum_limits = [1., 5., 4.]

            if visualize:
                plt.figure()
            for i in range(3):
                dr_pred = np.array(rn.dr_wrt(tr[i]).todense()).reshape(rn.shape) * eps_pred
                dr_pred = blur(dr_pred)

                # central differences
                tr[i] = tr[i].r + eps_emp/2.
                rn_greater = rn.r.copy()
                tr[i] = tr[i].r - eps_emp/1.
                rn_lesser = rn.r.copy()
                tr[i] = tr[i].r + eps_emp/2.

                dr_emp = blur((rn_greater - rn_lesser) * eps_pred / eps_emp)

                dr_pred_shown = np.clip(dr_pred, -.5, .5) + .5
                dr_emp_shown = np.clip(dr_emp, -.5, .5) + .5

                if visualize:
                    plt.subplot(3,3,i+1)
                    plt.imshow(dr_pred_shown)
                    plt.title('pred')
                    plt.axis('off')

                    plt.subplot(3,3,3+i+1)
                    plt.imshow(dr_emp_shown)
                    plt.title('empirical')
                    plt.axis('off')

                    plt.subplot(3,3,6+i+1)

                diff = np.abs(dr_emp - dr_pred)
                if visualize:
                    plt.imshow(diff)
                diff = diff.ravel()
                if visualize:
                    plt.title('diff (sum: %.2e)'  % (np.sum(diff)))
                    plt.axis('off')

                # print 'dr pred sum: %.2e' % (np.sum(np.abs(dr_pred.ravel())),)
                # print 'dr emp sum: %.2e' % (np.sum(np.abs(dr_emp.ravel())),)

                #import pdb; pdb.set_trace()
                self.assertTrue(np.sum(diff) < sum_limits[i])
예제 #33
0
import numpy as np
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
from opendr.camera import ProjectPoints
from smpl_webuser.serialization import load_model

## Load SMPL model (here we load the female model)
m = load_model('../../models/basicModel_f_lbs_10_207_0_v1.0.0.pkl')

## Assign random pose and shape parameters
m.pose[:] = np.random.rand(m.pose.size) * .2
m.betas[:] = np.random.rand(m.betas.size) * .03
m.pose[0] = np.pi

## Create OpenDR renderer
rn = ColoredRenderer()

## Assign attributes to renderer
w, h = (640, 480)

rn.camera = ProjectPoints(v=m, rt=np.zeros(3), t=np.array([0, 0, 2.]), f=np.array([w,w])/2., c=np.array([w,h])/2., k=np.zeros(5))
rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
rn.set(v=m, f=m.f, bgcolor=np.zeros(3))

## Construct point light source
rn.vc = LambertianPointLight(
    f=m.f,
    v=rn.v,
    num_verts=len(m),
    light_pos=np.array([-1000,-1000,-2000]),
    vc=np.ones_like(m)*.9,