Ejemplo n.º 1
0
def main(opt):
    ply_start = '''ply
    format ascii 1.0
    element vertex {}
    property float x
    property float y
    property float z
    property uchar red
    property uchar green
    property uchar blue
    end_header\n'''

    model = body_models.create(model_path='../3d_data/models',
                               model_type='smpl',
                               gender='male',
                               ext='pkl')
    smpl = pickle.load(open('../3d_data/densepose_uv.pkl', 'rb'))
    faces = np.array(smpl['f_extended'], dtype=np.int64).reshape((-1, 3))
    uv_faceid = io.loadmat(
        '../3d_data/DensePoseData/UV_data/UV_Processed.mat')['All_FaceIndices']
    uv = smpl['uv']

    # with open('../3d_data/nongrey_male_0110.jpg', 'rb') as file:
    texture = cv2.imread('../3d_data/nongrey_male_0110.jpg')

    output = model(return_verts=True)
    vertices = output.vertices.detach().cpu().numpy().squeeze()

    smpl_vertices = np.array([vertices[i] for i in smpl['v_extended']])

    smpl_uv_visual = trimesh.visual.TextureVisuals(uv=uv, image=texture)

    # smpl_render = Render(width=opt.image_width, height=opt.image_height,
    #                      camera_distance=opt.camera_distance, pose_y=opt.global_y,
    #                      focal_length=opt.focal_length)
    smpl_render = Render(width=opt.image_width,
                         height=opt.image_height,
                         pose_y=opt.global_y)

    smpl_render.set_render(vertices=smpl_vertices,
                           faces=faces,
                           visual=smpl_uv_visual)

    smpl_norm_vertices = smpl_render.vertices

    smpl_render_uv = smpl_render.render_visual(
        flags=pyrender.RenderFlags.UV_RENDERING, face_id=uv_faceid)

    smpl_render.set_render(vertices=smpl_vertices, faces=faces)

    norm_vertices = smpl_render.vertices
    color = (norm_vertices * 255).astype('uint8')
    concatenated_smpl = np.concatenate((norm_vertices, color), axis=1)
    ply_start = ply_start.format(norm_vertices.shape[0])

    with open(opt.save_loc, 'w') as write_file:
        write_file.write(ply_start)
        np.savetxt(write_file,
                   concatenated_smpl,
                   fmt=' '.join(['%0.8f'] * 3 + ['%d'] * 3))
Ejemplo n.º 2
0
def main(opt):
    model = body_models.create(model_path='../3d_data/models',
                               model_type='smpl',
                               gender='male',
                               ext='pkl')
    smpl = pickle.load(open('../3d_data/densepose_uv.pkl', 'rb'))
    faces = np.array(smpl['f_extended'], dtype=np.int64).reshape((-1, 3))
    uv_faceid = io.loadmat(
        '../3d_data/DensePoseData/UV_data/UV_Processed.mat')['All_FaceIndices']
    uv = smpl['uv']

    # with open('../3d_data/nongrey_male_0110.jpg', 'rb') as file:
    texture = cv2.imread('../3d_data/nongrey_male_0110.jpg')

    global_tr = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                          [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    # set up the rendering objects
    focal_length = opt.focal_length * opt.image_height
    # mesh_camera = pyrender.IntrinsicsCamera(focal_length, focal_length, opt.image_width / 2, opt.image_height / 2,
    #                                    opt.znear, opt.zfar)
    mesh_camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0, znear=0.05)
    camera = pyrender.OrthographicCamera(xmag=1.0, ymag=1.0, znear=0.05)
    camera_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                            [0.0, 0.0, 1.0, 0.05], [0.0, 0.0, 0.0, 1.0]])

    mesh_tr = np.array([[1.0, 0.0, 0.0, 0.0],
                        [0.0, 1.0, 0.0, opt.global_y + 0.11],
                        [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])

    mesh_camera_pose = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0],
                                 [0.0, 0.0, 1.0, opt.camera_distance],
                                 [0.0, 0.0, 0.0, 1.0]])

    render = pyrender.OffscreenRenderer(opt.image_width, opt.image_height)

    output = model(return_verts=True)
    vertices = output.vertices.detach().cpu().numpy().squeeze()

    mesh_verts = np.array([vertices[i] for i in smpl['v_extended']])
    visual_check = trimesh.visual.TextureVisuals(uv=uv, image=texture)

    tri_mesh_scene = trimesh.Trimesh(vertices=mesh_verts,
                                     faces=faces,
                                     visual=visual_check)

    mesh_body = pyrender.Mesh.from_trimesh(tri_mesh_scene)
    mesh_scene = pyrender.Scene(ambient_light=[0.5, 0.5, 0.5],
                                bg_color=[-1.0, -1.0, -1.0])

    mesh_scene.add(mesh_body, pose=mesh_tr)
    mesh_scene.add(mesh_camera, pose=mesh_camera_pose)

    rendered_uv, depth = render.render(scene=mesh_scene,
                                       flags=pyrender.RenderFlags.UV_RENDERING)
    rendered_uv = rendered_uv.copy()

    mask = rendered_uv[:, :, 2] != -1.
    temp_2 = rendered_uv[:, :, 2]
    temp_2[mask] = np.take(uv_faceid, temp_2[mask].astype('int'))
    rendered_uv[:, :, 2] = temp_2

    cv2.imshow('UV', rendered_uv)
    bounds = tri_mesh_scene.bounding_box_oriented.extents

    mesh_verts -= mesh_scene.centroid
    mesh_verts /= bounds
    # mesh_verts *= 2
    mesh_verts = mesh_verts + 1 / 2

    face_select = faces[uv_faceid[:, 0] == 1]

    # verts = np.concatenate((uv, np.ones(uv.shape[:2] + (1,))), axis=2)

    # uv[:, 2] = 1
    verts = (uv * 2) - 1
    visual = trimesh.visual.ColorVisuals(vertex_colors=uv)
    tri_mesh = trimesh.Trimesh(vertices=verts,
                               faces=face_select,
                               visual=visual)
    # tri_mesh

    mesh = pyrender.Mesh.from_trimesh(tri_mesh)

    # tri_mesh.show()

    scene = pyrender.Scene(ambient_light=[0.5, 0.5, 0.5],
                           bg_color=[-1.0, -1.0, -1.0])
    scene.add(mesh, pose=global_tr)
    scene.add(camera, pose=camera_pose)

    rendered_color_visual, depth = render.render(
        scene=scene, flags=pyrender.RenderFlags.SKIP_CULL_FACES)
    # pyrender.Viewer(scene, render_flags={'cull_faces': False})
    cv2.imshow('Part UV', rendered_color_visual)
    # cv2.waitKey(0)

    rendered_interp, _ = render.render(
        scene=scene,
        flags=pyrender.RenderFlags.BARYCENTRIC_COORDINATES
        | pyrender.RenderFlags.SKIP_CULL_FACES)
    tri_id, _ = render.render(scene=scene,
                              flags=pyrender.RenderFlags.TRIANGLE_ID_RENDERING
                              | pyrender.RenderFlags.SKIP_CULL_FACES)

    vertex_stream = np.take(mesh_verts, face_select, axis=0)
    tri_id = tri_id[:, :, 0]

    rendered_interp = rendered_interp.reshape(rendered_interp.shape +
                                              (1, )).repeat([3], axis=-1)
    out_view = vertex_stream[tri_id.astype('int')] * rendered_interp
    out_view = out_view.sum(axis=-2)

    # rendered_uv[rendered_uv == -1] = 0
    # rendered_uv[:, :, 2] /= 255
    out_view[rendered_color_visual < 0] = 0

    # cv2.imwrite('../saves/checks/mesh_normalized_uv.jpg', (rendered_uv * 255).astype('uint8'))
    cv2.imshow('Coords', out_view)
    cv2.imwrite('../saves/checks/mesh_uv_render.jpg',
                (out_view * 255).astype('uint8'))
    cv2.waitKey(0)
Ejemplo n.º 3
0
def main(opt):

    model = body_models.create(model_path='../3d_data/models',
                               model_type='smpl',
                               gender='male',
                               ext='pkl')
    smpl = pickle.load(open('../3d_data/densepose_uv.pkl', 'rb'))
    faces = np.array(smpl['f_extended'], dtype=np.int64).reshape((-1, 3))
    uv_faceid = io.loadmat(
        '../3d_data/DensePoseData/UV_data/UV_Processed.mat')['All_FaceIndices']
    uv = smpl['uv']
    uv_vertices = (uv * 2) - 1
    # with open('../3d_data/nongrey_male_0110.jpg', 'rb') as file:
    texture = cv2.imread('../3d_data/nongrey_male_0110.jpg')

    output = model(return_verts=True)
    vertices = output.vertices.detach().cpu().numpy().squeeze()

    smpl_vertices = np.array([vertices[i] for i in smpl['v_extended']])

    smpl_uv_visual = trimesh.visual.TextureVisuals(uv=uv, image=texture)

    # smpl_render = Render(width=opt.image_width, height=opt.image_height, pose_y=opt.global_y)

    smpl_render = Render(width=opt.image_width,
                         height=opt.image_height,
                         camera_distance=opt.camera_distance,
                         pose_y=opt.global_y,
                         focal_length=opt.focal_length)

    smpl_render.set_render(vertices=smpl_vertices, faces=faces)

    smpl_norm_vertices = smpl_render.vertices

    smpl_uv = smpl_render.render_interpolate(vertices=uv, skip_cull=False)
    smpl_noc = smpl_render.render_interpolate(
        vertices=smpl_norm_vertices).interpolated

    smpl_class_id = match_faces(smpl_uv.triangle, uv_faceid)
    smpl_class_id = smpl_class_id.reshape(smpl_class_id.shape + (1, ))
    smpl_class_id[smpl_class_id == -1] = 0

    smpl_uv_stack = np.array([]).reshape(
        (0, opt.image_height, opt.image_width, 2))
    aggregate_textures = np.array([], dtype='float64').reshape(
        (0, 3, opt.image_height, opt.image_width))

    uv_render = Render(width=opt.image_width, height=opt.image_height)

    for idx in range(1, 25):
        # face_select = faces[uv_faceid[:, 0] == idx, :]
        id_select = np.unique(
            np.hstack(
                np.where(faces == vert)[0]
                for face in faces[uv_faceid[:, 0] == idx, :] for vert in face))

        face_select = faces[id_select, :]

        uv_render.set_render(vertices=uv_vertices,
                             faces=face_select,
                             normalize=False)
        # out_view = uv_render.render_visual(flags=pyrender.RenderFlags.SKIP_CULL_FACES)
        out_view = np.flip(uv_render.render_interpolate(
            vertices=smpl_norm_vertices).interpolated.transpose([2, 0, 1]),
                           axis=1)
        aggregate_textures = np.concatenate(
            [aggregate_textures,
             out_view.reshape((1, ) + out_view.shape)])
        smpl_uv_stack = np.concatenate([
            smpl_uv_stack,
            (smpl_uv.interpolated[:, :, :-1] *
             (smpl_class_id.repeat([2], axis=-1)
              == idx)).reshape((1, ) + smpl_uv.interpolated[:, :, :-1].shape)
        ])

        # cv2.imshow("Part Texture", aggregate_textures[idx - 1].transpose([1, 2, 0]))
        # cv2.waitKey(0)

    texture_map = torch.from_numpy(aggregate_textures)

    smpl_uv_stack = torch.from_numpy((smpl_uv_stack * 2) - 1)

    output_textured_uv = 0

    for idx in range(0, 24):
        output_textured_uv += torch.nn.functional.grid_sample(
            texture_map[idx:idx + 1],
            smpl_uv_stack[idx:idx + 1],
            mode='bilinear',
            padding_mode='border')

    output_textured_uv = output_textured_uv[0].cpu().numpy().transpose(
        [1, 2, 0])
    cv2.imshow("Resampled UV", output_textured_uv)
    cv2.imwrite('../saves/checks/sampled_NOC_render.jpg',
                (output_textured_uv * 255).astype('uint8'))
    cv2.imshow("NOC", smpl_noc)
    cv2.imwrite('../saves/checks/NOC_render.jpg',
                (smpl_noc * 255).astype('uint8'))
    cv2.imshow("Rendered UV", smpl_uv.interpolated)
    cv2.imwrite('../saves/checks/UV_render.jpg',
                (smpl_uv.interpolated * 255).astype('uint8'))
    cv2.imshow("Rendered Class", smpl_class_id.astype('uint8'))
    print(
        "Image mean: ",
        np.mean(
            cv2.subtract(output_textured_uv.astype('float64'),
                         smpl_noc.astype('float64'))))
    cv2.waitKey(0)
Ejemplo n.º 4
0
def main(opt):

    model = body_models.create(model_path='../3d_data/models',
                               model_type='smpl',
                               gender='male',
                               ext='pkl')
    smpl = pickle.load(open('../3d_data/densepose_uv.pkl', 'rb'))
    faces = np.array(smpl['f_extended'], dtype=np.int64).reshape((-1, 3))
    uv_faceid = io.loadmat(
        '../3d_data/DensePoseData/UV_data/UV_Processed.mat')['All_FaceIndices']
    uv = smpl['uv']

    # with open('../3d_data/nongrey_male_0110.jpg', 'rb') as file:
    texture = cv2.imread('../3d_data/nongrey_male_0110.jpg')

    output = model(return_verts=True)
    vertices = output.vertices.detach().cpu().numpy().squeeze()

    smpl_vertices = np.array([vertices[i] for i in smpl['v_extended']])

    smpl_uv_visual = trimesh.visual.TextureVisuals(uv=uv, image=texture)

    # smpl_render = Render(width=opt.image_width, height=opt.image_height,
    #                      camera_distance=opt.camera_distance, pose_y=opt.global_y,
    #                      focal_length=opt.focal_length)
    smpl_render = Render(width=opt.image_width,
                         height=opt.image_height,
                         pose_y=opt.global_y)

    smpl_render.set_render(vertices=smpl_vertices,
                           faces=faces,
                           visual=smpl_uv_visual)

    smpl_norm_vertices = smpl_render.vertices

    smpl_render_uv = smpl_render.render_visual(
        flags=pyrender.RenderFlags.UV_RENDERING, face_id=uv_faceid)

    smpl_render.set_render(vertices=smpl_vertices, faces=faces)

    norm_vertices = smpl_render.vertices

    smpl_render_norm = smpl_render.render_interpolate(vertices=uv).interpolated

    smpl_body_uv = smpl_render_uv[:, :, :2]

    smpl_body_class = smpl_render_uv[:, :, 2:3]

    smpl_uv_stack = np.array([]).reshape(
        (0, opt.image_height, opt.image_width, 2))

    uv_vertices = (uv * 2) - 1
    uv_render = Render(width=opt.image_width, height=opt.image_height)

    aggregate_textures = np.array([], dtype='float64').reshape(
        (0, 3, opt.image_height, opt.image_width))

    for idx in range(1, 4):
        face_select = faces[uv_faceid[:, 0] == idx]
        uv_visual = trimesh.visual.ColorVisuals(vertex_colors=uv)
        uv_render.set_render(vertices=uv_vertices,
                             faces=face_select,
                             visual=uv_visual,
                             normalize=False)
        # out_view = uv_render.render_visual(flags=pyrender.RenderFlags.SKIP_CULL_FACES)
        out_view = uv_render.render_interpolate(
            vertices=smpl_norm_vertices).interpolated.transpose([2, 0, 1])
        aggregate_textures = np.concatenate(
            [aggregate_textures,
             out_view.reshape((1, ) + out_view.shape)])
        smpl_uv_stack = np.concatenate([
            smpl_uv_stack,
            (smpl_body_uv * (smpl_body_class.repeat([2], axis=-1)
                             == idx)).reshape((1, ) + smpl_body_uv.shape)
        ])

        cv2.imshow(
            "Part body UV",
            np.concatenate(
                [smpl_uv_stack[idx - 1],
                 np.zeros(smpl_body_class.shape)],
                axis=-1))
        cv2.imshow("Part Texture",
                   aggregate_textures[idx - 1].transpose([1, 2, 0]))
        cv2.waitKey(0)

    texture_map = torch.from_numpy(aggregate_textures)

    smpl_uv_stack = torch.from_numpy((smpl_uv_stack * 2) - 1)

    output_textured_uv = 0

    for idx in range(0, 3):
        output_textured_uv += torch.nn.functional.grid_sample(
            texture_map[idx:idx + 1],
            smpl_uv_stack[idx:idx + 1],
            mode='bilinear',
            padding_mode='border')

    output_textured_uv = output_textured_uv[0].cpu().numpy().transpose(
        [1, 2, 0])
    cv2.imshow("Resampled UV", output_textured_uv)
    cv2.imshow("Real Norm", smpl_render_norm)
    cv2.waitKey(0)
Ejemplo n.º 5
0
def main(opt):
    coco_folder = os.environ['COCO']
    save_annotation_file = opt.output

    if not os.path.exists(save_annotation_file):
        os.mkdir(save_annotation_file)
    annotation_dict = {
        'minival': [
            COCO(coco_folder +
                 '/annotations/densepose_coco_2014_minival.json'),
            COCO(coco_folder + '/annotations/person_keypoints_val2014.json'),
            'val2014'
        ],
        'valminusminival': [
            COCO(coco_folder +
                 '/annotations/densepose_coco_2014_valminusminival.json'),
            COCO(coco_folder + '/annotations/person_keypoints_val2014.json'),
            'val2014'
        ],
        'train': [
            COCO(coco_folder + '/annotations/densepose_coco_2014_train.json'),
            COCO(coco_folder + '/annotations/person_keypoints_train2014.json'),
            'train2014'
        ],
        'test': [
            COCO(coco_folder + '/annotations/densepose_coco_2014_test.json'),
            'test2014'
        ]
    }

    #################################

    # SMPL prep

    model = body_models.create(model_path='../3d_data/models',
                               model_type='smpl',
                               gender='male',
                               ext='pkl')
    smpl = pickle.load(open('../3d_data/densepose_uv.pkl', 'rb'))
    faces = np.array(smpl['f_extended'], dtype=np.int64).reshape((-1, 3))
    uv_faceid = io.loadmat(
        '../3d_data/DensePoseData/UV_data/UV_Processed.mat')['All_FaceIndices']
    uv = smpl['uv']
    uv_vertices = (uv * 2) - 1
    # with open('../3d_data/nongrey_male_0110.jpg', 'rb') as file:
    # texture = cv2.imread('../3d_data/nongrey_male_0110.jpg')

    seg_path = os.path.join(coco_folder, 'background')
    if not os.path.exists(seg_path):
        os.mkdir(seg_path)

    output = model(return_verts=True)
    vertices = output.vertices.detach().cpu().numpy().squeeze()

    smpl_vertices = np.array([vertices[i] for i in smpl['v_extended']])

    smpl_render = Render(width=opt.image_width, height=opt.image_height)

    smpl_render.set_render(vertices=smpl_vertices, faces=faces)

    smpl_norm_vertices = smpl_render.vertices

    # smpl_uv = smpl_render.render_interpolate(vertices=uv, skip_cull=False)
    # smpl_noc = smpl_render.render_interpolate(vertices=smpl_norm_vertices).interpolated

    aggregate_textures = np.array([], dtype='float64').reshape(
        (0, 3, opt.image_height, opt.image_width))

    uv_render = Render(width=opt.image_width, height=opt.image_height)

    kernel = np.ones((3, 3), np.uint8)

    for idx in range(1, 25):
        # face_select = faces[uv_faceid[:, 0] == idx, :]
        id_select = np.unique(
            np.hstack(
                np.where(faces == vert)[0]
                for face in faces[uv_faceid[:, 0] == idx, :] for vert in face))

        face_select = faces[id_select, :]

        uv_render.set_render(vertices=uv_vertices,
                             faces=face_select,
                             normalize=False)

        out_view = np.flip(uv_render.render_interpolate(
            vertices=smpl_norm_vertices).interpolated.transpose([2, 0, 1]),
                           axis=1)
        aggregate_textures = np.concatenate(
            [aggregate_textures,
             out_view.reshape((1, ) + out_view.shape)])

    texture_map = torch.from_numpy(aggregate_textures)

    print("SMPL textures loaded in memory.\n")

    #################################

    for key in annotation_dict:
        dp_coco = annotation_dict[key][0]
        person_coco = annotation_dict[key][1]
        parent_dir = annotation_dict[key][2]

        seg_key_path = os.path.join(seg_path, parent_dir)
        if not os.path.exists(seg_key_path):
            os.mkdir(seg_key_path)

        im_ids = dp_coco.getImgIds()
        # len_ids = len(im_ids)
        key_list = []
        for idx, im_id in enumerate(
                tqdm(im_ids, desc="Key [{}] Progress".format(key), ncols=100)):
            im_dict = {}
            im = dp_coco.loadImgs(im_id)[0]
            person_im = person_coco.loadImgs(im_id)[0]
            im_name = os.path.join(coco_folder, parent_dir, im['file_name'])
            image = cv2.imread(im_name)
            # im_dict['image'] = image

            im_dict['file_name'] = os.path.join(parent_dir, im['file_name'])

            ann_ids = dp_coco.getAnnIds(imgIds=im['id'])
            anns = dp_coco.loadAnns(ann_ids)
            person_anns = person_coco.loadAnns(ann_ids)

            im_dict['points'] = {}
            zero_im = np.zeros((image.shape[0], image.shape[1]))

            person_seg = np.zeros((image.shape[0], image.shape[1]))
            point_dict = im_dict['points']
            point_dict['yx'] = np.array([], dtype='int').reshape((0, 2))
            point_dict['iuv'] = np.array([]).reshape((0, 3))

            xy_mask = np.zeros((image.shape[0], image.shape[1], 1))
            zero_point_iuv = np.zeros_like(image)
            zero_point_uv = np.zeros((24, image.shape[0], image.shape[1], 2))

            for person_ann in person_anns:
                person_seg += person_coco.annToMask(person_ann)

            for ann in anns:

                if 'dp_masks' in ann.keys():

                    bbr = (np.array(ann['bbox'])).astype('int')
                    mask = GetDensePoseMask(ann['dp_masks'])
                    x1, y1, x2, y2 = bbr[0], bbr[
                        1], bbr[0] + bbr[2], bbr[1] + bbr[3]
                    x2 = min([x2, image.shape[1]])
                    y2 = min([y2, image.shape[0]])

                    mask_im = cv2.resize(mask, (int(x2 - x1), int(y2 - y1)),
                                         interpolation=cv2.INTER_NEAREST)
                    # mask_bool = np.tile((mask_im == 0)[:, :, np.newaxis], [1, 1, 3])
                    zero_im[y1:y2, x1:x2] += mask_im

                    img_x = np.array(ann['dp_x']) / 255. * bbr[
                        2] + x1  # Stretch the points to current box.
                    img_y = np.array(ann['dp_y']) / 255. * bbr[
                        3] + y1  # Stretch the points to current box.
                    img_x = img_x.astype('int') - 1 * (img_x >= image.shape[1])
                    img_y = img_y.astype('int') - 1 * (img_y >= image.shape[0])

                    point_dict['yx'] = np.concatenate(
                        [point_dict['yx'],
                         np.array([img_y, img_x]).T])

                    point_i = np.array(ann['dp_I']).astype('int')
                    point_u = np.array(ann['dp_U'])
                    point_v = np.array(ann['dp_V'])
                    point_dict['iuv'] = np.concatenate(
                        (point_dict['iuv'],
                         np.array([point_i, point_u, point_v]).T))

                    zero_point_iuv[img_y, img_x, :] = np.array(
                        [point_i, point_u, point_v]).T

                    xy_mask[img_y, img_x, 0] = 1

                    zero_point_uv[point_i - 1, img_y,
                                  img_x] = np.array([point_u, point_v]).T

            uv_stack = torch.from_numpy((zero_point_uv * 2) - 1)

            output_noc = 0
            for idx in range(0, 24):
                output_noc += torch.nn.functional.grid_sample(
                    texture_map[idx:idx + 1],
                    uv_stack[idx:idx + 1],
                    mode='bilinear',
                    padding_mode='border')

            output_noc = output_noc[0].cpu().numpy().transpose([1, 2, 0])

            zero_im = zero_im + person_seg

            zero_im = (zero_im > 0).astype('float32')
            zero_im = cv2.dilate(zero_im, kernel, iterations=1)

            cv2.imwrite(os.path.join(seg_key_path, im['file_name']),
                        (zero_im == 0.0).astype('uint8'))

            point_dict['noc'] = output_noc[point_dict['yx'][:, 0],
                                           point_dict['yx'][:, 1], :]
            # print(np.min(point_dict['yx']))
            key_list.append(im_dict)

            # cv2.imshow("Image", image)
            # cv2.imshow("Background Image", (zero_im == 0.0).astype('uint8') * 255)
            # cv2.imshow("IUV", (zero_point_iuv * 30).astype('uint8'))
            # cv2.imshow("NOC sampled", (output_noc * 255).astype('uint8'))
            #
            # cv2.waitKey(0)

            # progress_bar(idx + 1, len_ids, prefix="Progress for {}:".format(key), suffix="Complete")

        save_file = os.path.join(save_annotation_file, '{}.pkl'.format(key))
        with open(save_file, 'wb') as write_file:
            pickle.dump(key_list, write_file, protocol=pickle.HIGHEST_PROTOCOL)