def infer_fg(self, img):
        """
        img: BGR image of shape (H, W, C)
        returns: binary mask image of shape (H, W), 255 for fg, 0 for bg
        """
        ori_h, ori_w = img.shape[0:2]
        new_h, new_w = self.get_working_size(ori_h, ori_w)
        img = cv2.resize(img, (new_w, new_h))

        # Get results of original image
        multiplier = get_multiplier(img)

        with torch.no_grad():
            orig_paf, orig_heat = get_outputs(multiplier, img, self.model,
                                              'rtpose')

            # Get results of flipped image
            swapped_img = img[:, ::-1, :]
            flipped_paf, flipped_heat = get_outputs(multiplier, swapped_img,
                                                    self.model, 'rtpose')

            # compute averaged heatmap and paf
            paf, heatmap = handle_paf_and_heat(orig_heat, flipped_heat,
                                               orig_paf, flipped_paf)

        param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
        to_plot, canvas, candidate, subset = decode_pose_fg(
            img, param, heatmap, paf)

        canvas = cv2.resize(canvas, (ori_w, ori_h))
        fg_map = canvas > 128
        canvas[fg_map] = 255
        canvas[~fg_map] = 0
        return canvas[:, :, 0]
Esempio n. 2
0
def skeleton_frame(idx):
    img_path = img_dir.joinpath('{:05d}.png'.format(idx))

    img = cv2.imread(str(img_path))

    shape_dst = np.min(img.shape[:2])
    oh = (img.shape[0] - shape_dst) // 2
    ow = (img.shape[1] - shape_dst) // 2

    img = img[oh:oh + shape_dst, ow:ow + shape_dst]
    img = cv2.resize(img, (512, 512))
    multiplier = get_multiplier(img)
    with torch.no_grad():
        paf, heatmap = get_outputs(multiplier, img, model, 'rtpose')
    r_heatmap = np.array([remove_noise(ht)
          for ht in heatmap.transpose(2, 0, 1)[:-1]])\
         .transpose(1, 2, 0)
    heatmap[:, :, :-1] = r_heatmap
    param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
    label, cord = get_pose(param, heatmap, paf)

    mask = label[:, :] > 0

    intensity = .80
    img[mask, :] = int(255 * intensity)

    fig.clear()
    plt.axis('off')

    plt.imshow(img)
def save(idx):
    global pose_cords
    if not os.path.exists(str(train_img_dir.joinpath(
            '{:05}.png'.format(idx)))):
        try:
            img_path = img_dir.joinpath('{:05}.png'.format(idx))
            img = cv2.imread(str(img_path))
            shape_dst = np.min(img.shape[:2])
            oh = (img.shape[0] - shape_dst) // 2
            ow = (img.shape[1] - shape_dst) // 2

            img = img[oh:oh + shape_dst, ow:ow + shape_dst]
            img = cv2.resize(img, (512, 512))
            multiplier = get_multiplier(img)
            with torch.no_grad():
                paf, heatmap = get_outputs(multiplier, img, model, 'rtpose')
            r_heatmap = np.array([
                remove_noise(ht) for ht in heatmap.transpose(2, 0, 1)[:-1]
            ]).transpose(1, 2, 0)
            heatmap[:, :, :-1] = r_heatmap
            param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
            # TODO get_pose
            label, cord = get_pose(param, heatmap, paf)
            index = 13
            crop_size = 25
            try:
                head_cord = cord[index]
            except:
                try:
                    head_cord = pose_cords[
                        -1]  # if there is not head point in picture, use last frame
                except:
                    head_cord = None

            pose_cords.append(head_cord)
            try:
                head = img[int(head_cord[1] - crop_size):int(head_cord[1] +
                                                             crop_size),
                           int(head_cord[0] - crop_size):int(head_cord[0] +
                                                             crop_size), :]
            except:
                pass
            #    plt.imshow(head)
            plt.savefig(str(train_head_dir.joinpath(
                'pose_{}.jpg'.format(idx))))
            plt.clf()
            cv2.imwrite(str(train_img_dir.joinpath('{:05}.png'.format(idx))),
                        img)
            cv2.imwrite(str(train_label_dir.joinpath('{:05}.png'.format(idx))),
                        label)
            return True
        except:
            return False

    else:
        return False
Esempio n. 4
0
def extract_poses(model, save_dir):
    '''make label images for pix2pix'''
    test_img_dir = os.path.join(save_dir, 'test_img')
    os.makedirs(test_img_dir, exist_ok=True)
    test_label_dir = os.path.join(save_dir, 'test_label_ori')
    os.makedirs(test_label_dir, exist_ok=True)
    test_head_dir = os.path.join(save_dir, 'test_head_ori')
    os.makedirs(test_head_dir, exist_ok=True)

    img_dir = os.path.join(save_dir, 'images')

    pose_cords = []
    for idx in tqdm(range(len(os.listdir(img_dir)))):
        img_path = os.path.join(img_dir, '{:05}.png'.format(idx))
        img = cv2.imread(img_path)

        shape_dst = np.min(img.shape[:2])
        oh = (img.shape[0] - shape_dst) // 2
        ow = (img.shape[1] - shape_dst) // 2

        img = img[oh:oh + shape_dst, ow:ow + shape_dst]
        img = cv2.resize(img, (512, 512))
        multiplier = get_multiplier(img)
        with torch.no_grad():
            paf, heatmap = get_outputs(multiplier, img, model, 'rtpose', device)
        r_heatmap = np.array([remove_noise(ht)
                              for ht in heatmap.transpose(2, 0, 1)[:-1]]) \
            .transpose(1, 2, 0)
        heatmap[:, :, :-1] = r_heatmap
        param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
        label, cord = get_pose(param, heatmap, paf)
        index = 13
        crop_size = 25
        try:
            head_cord = cord[index]
        except:
            head_cord = pose_cords[-1] # if there is not head point in picture, use last frame

        pose_cords.append(head_cord)
        head = img[int(head_cord[1] - crop_size): int(head_cord[1] + crop_size),
                   int(head_cord[0] - crop_size): int(head_cord[0] + crop_size), :]
        plt.imshow(head)
        plt.savefig(os.path.join(test_head_dir, 'pose_{}.jpg'.format(idx)))
        plt.clf()
        cv2.imwrite(os.path.join(test_img_dir, '{:05}.png'.format(idx)), img)
        cv2.imwrite(os.path.join(test_label_dir, '{:05}.png'.format(idx)), label)
        if idx % 100 == 0 and idx != 0:
            pose_cords_arr = np.array(pose_cords, dtype=np.int)
            np.save(os.path.join(save_dir, 'pose_source.npy'), pose_cords_arr)

    pose_cords_arr = np.array(pose_cords, dtype=np.int)
    np.save(os.path.join(save_dir, 'pose_source.npy'), pose_cords_arr)
    torch.cuda.empty_cache()
def process(model, oriImg, process_speed):
    # Get results of original image
    multiplier = get_multiplier(oriImg, process_speed)
    with torch.no_grad():
        orig_paf, orig_heat = get_outputs(multiplier, oriImg, model, 'rtpose')

        # Get results of flipped image
        swapped_img = oriImg[:, ::-1, :]
        flipped_paf, flipped_heat = get_outputs(multiplier, swapped_img, model,
                                                'rtpose')

        # compute averaged heatmap and paf
        paf, heatmap = handle_paf_and_heat(orig_heat, flipped_heat, orig_paf,
                                           flipped_paf)
    param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
    to_plot, canvas, joint_list, person_to_joint_assoc = decode_pose(
        oriImg, param, heatmap, paf)
    return to_plot, canvas, joint_list, person_to_joint_assoc
Esempio n. 6
0
model.cuda()
model.float()
model.eval()

if __name__ == "__main__":

    video_capture = cv2.VideoCapture(0)

    while True:
        # Capture frame-by-frame
        ret, oriImg = video_capture.read()

        shape_dst = np.min(oriImg.shape[0:2])

        # Get results of original image
        multiplier = get_multiplier(oriImg)

        with torch.no_grad():
            paf, heatmap = get_outputs(shape_dst, model, 'rtpose')
        #with torch.no_grad():
        #paf, heatmap = get_outputs(oriImg, model,  'rtpose')

        heatmap_peaks = np.zeros_like(heatmap)
        for i in range(19):
            heatmap_peaks[:, :, i] = find_peaks(heatmap[:, :, i])
        heatmap_peaks = heatmap_peaks.astype(np.float32)
        heatmap = heatmap.astype(np.float32)
        paf = paf.astype(np.float32)

        #C++ postprocessing
        pafprocess.process_paf(heatmap_peaks, heatmap, paf)
Esempio n. 7
0
def generate(origin_img, img_dir, label_dir, size_dst, size_crop, crop_from, pose_transform=False):
    # Pose estimation (OpenPose)
    openpose_dir = Path('../src/pytorch_Realtime_Multi-Person_Pose_Estimation/')

    sys.path.append(str(openpose_dir))
    sys.path.append('../src/utils')
    # from Pose estimation
    from evaluate.coco_eval import get_multiplier, get_outputs
    # utils
    from openpose_utils import remove_noise, get_pose, get_pose_coord, get_pose_new


    model = pose_model()

    total = len(list(origin_img.iterdir()))
    img_idx = range(total)

    if pose_transform:
        ratio_src, ratio_tar = '../data/source/ratio_a.png', '../data/target/ratio_b.png'
        if not os.path.isfile(ratio_src):
            raise TypeError('Directory not exists: {}'.format(ratio_src))
        if not os.path.isfile(ratio_tar):
            raise TypeError('Directory not exists: {}'.format(ratio_tar))

        imgset = [ratio_src, ratio_tar]
        origin = []
        height = []
        ratio = {'0-1': None, '1-2': None, '2-3': None, '3-4': None, '1-8': None, '8-9': None,
                 '9-10': None, '0-14': None, '14-16': None}  # target/source
        coord = {'0-1': [], '1-2': [], '2-3': [], '3-4': [], '1-8': [], '8-9': [], '9-10': [], '0-14':[], '14-16':[]}  # len of joint
        # co_tar = {'0-1':None, '1-2':None, '2-3':None,'3-4':None,'1-8':None,'8-9':None,'9-10':None}

        for img_path in imgset:
            img = cv2.imread(str(img_path))
            if not img.shape[:2] == size_dst[::-1]:  # format: (h, w)
                img = img_resize(img, size_crop, crop_from, size_dst)  # size_dst format: (W, H)
            multiplier = get_multiplier(img)
            with torch.no_grad():
                paf, heatmap = get_outputs(multiplier, img, model, 'rtpose')
            r_heatmap = np.array([remove_noise(ht)
                                  for ht in heatmap.transpose(2, 0, 1)[:-1]]) \
                .transpose(1, 2, 0)
            heatmap[:, :, :-1] = r_heatmap
            param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}  # only 'thre2' matters

            label, joint_list = get_pose_coord(img, param, heatmap, paf)
            # print ('joint list: \n',joint_list)

            origin.append(joint_list[1][0][:2])  # we set the no.1 pose (neck) as the original ref. point
            height_max = max(joint_list, key=lambda x: x[0][1])[0][1]
            height_min = min(joint_list, key=lambda x: x[0][1])[0][1]
            height.append(height_max - height_min)

            for k in ratio.keys():
                klist = k.split('-')
                j_1, j_2 = int(klist[0]), int(klist[-1])
                # assert j_1 == int(joint_list[j_1][0][-1]) and j_2 == int(
                #    joint_list[j_2][0][-1])  # may cause issue if empty array exists
                co_1, co_2 = list(joint_list[j_1][0][:2]), list(joint_list[j_2][0][:2])
                j_len = ((co_1[0] - co_2[0]) ** 2 + (co_1[1] - co_2[1]) ** 2) ** 0.5
                coord[k].append(j_len)

        for k, v in coord.items():
            src_len, tar_len = v[0], v[1]
            ratio[k] = tar_len / src_len

        ratio_body = height[1] / height[0]  # target / source height
        print('ratio:\n', ratio, '\nratio_body:', ratio_body)  # test only

    for idx in tqdm(img_idx):
        img_path = origin_img.joinpath('img_{:04d}.png'.format(idx))
        img = cv2.imread(str(img_path))

        if not img.shape[:2] == size_dst[::-1]:
            # set crop size and resize
            img = img_resize(img, size_crop, crop_from, size_dst)  # size format: (W, H)

        multiplier = get_multiplier(img)
        with torch.no_grad():
            paf, heatmap = get_outputs(multiplier, img, model, 'rtpose')
        r_heatmap = np.array([remove_noise(ht)
                              for ht in heatmap.transpose(2, 0, 1)[:-1]]) \
            .transpose(1, 2, 0)
        heatmap[:, :, :-1] = r_heatmap
        param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}  # only thre2 makes effect

        if pose_transform:
            _, joint_list = get_pose_coord(img, param, heatmap, paf)
            #print('joint_list', '\n', joint_list)  # test only
            new_joint = translate(joint_list, ratio, origin, ratio_body)
            new_joint_list = new_joint.run()
            #print('joint_list new', '\n', new_joint_list)  # test only
            """
            with open('joint_list.txt','a') as f:
                f.write('joint_list_{}\n'.format(idx)+str(joint_list)+'\nnew_joint_list_{}\n'.format(idx)+str(new_joint_list)+'\n')
            """
            label = get_pose_new(img, param, heatmap, paf, new_joint_list)
        else:
            label = get_pose(img, param, heatmap, paf)  # size changed !!!

        cv2.imwrite(str(img_dir.joinpath('img_{:04d}.png'.format(idx))), img)
        cv2.imwrite(str(label_dir.joinpath('label_{:04d}.png'.format(idx))), label)

    torch.cuda.empty_cache()  #
    print(str(total) + ' ' + str(origin_img.parent.name) + ' images are generated')
Esempio n. 8
0
test_label_dir = save_dir.joinpath('test_label_ori')
test_label_dir.mkdir(exist_ok=True)
test_head_dir = save_dir.joinpath('test_head_ori')
test_head_dir.mkdir(exist_ok=True)

pose_cords = []
for idx in tqdm(range(len(os.listdir(str(img_dir))))):
    img_path = img_dir.joinpath('{:05}.png'.format(idx))
    img = cv2.imread(str(img_path))
    shape_dst = np.min(img.shape[:2])
    oh = (img.shape[0] - shape_dst) // 2
    ow = (img.shape[1] - shape_dst) // 2

    img = img[oh:oh + shape_dst, ow:ow + shape_dst]
    img = cv2.resize(img, (512, 512))
    multiplier = get_multiplier(img)
    with torch.no_grad():
        paf, heatmap = get_outputs(multiplier, img, model, 'rtpose')
    r_heatmap = np.array([remove_noise(ht)
                          for ht in heatmap.transpose(2, 0, 1)[:-1]]) \
        .transpose(1, 2, 0)
    heatmap[:, :, :-1] = r_heatmap
    param = {'thre1': 0.1, 'thre2': 0.05, 'thre3': 0.5}
    label, cord = get_pose(param, heatmap, paf)
    index = 13
    crop_size = 25
    try:
        head_cord = cord[index]
    except:
        head_cord = pose_cords[
            -1]  # if there is not head point in picture, use last frame
if __name__ == '__main__':

    train_pose_dir = train.joinpath('train_label')
    test_pose_dir = test.joinpath('test_label')
    model = get_model(trunk='vgg19')
    model_path = 'pose_model_scratch.pth'
    model = torch.nn.DataParallel(model).cuda()
    model.load_state_dict(torch.load(model_path))
    model.eval()

    for idx in range(200, 210):
        train_img_path = train.joinpath('train_set')
        train_img_name = "image%0d.jpg" % idx
        train_img_path = train_img_path.joinpath(train_img_name)
        train_image = cv2.resize( cv2.imread(str(train_img_path)), (512, 512))
        train_multiplier = get_multiplier(train_image)

        test_img_path = test.joinpath('test_set')
        test_img_name = "image%0d.jpg" % idx
        test_img_path = test_img_path.joinpath(test_img_name)
        test_image = cv2.resize( cv2.imread(str(test_img_path)), (512, 512))
        test_multiplier = get_multiplier(test_image)

        with torch.no_grad():
            train_paf, train_heatmap = get_outputs(train_multiplier, train_image, model, 'rtpose')
            test_paf, test_heatmap = get_outputs(test_multiplier, test_image, model, 'rtpose')

            # use [::-1] to reverse!
            train_swapped_img = train_image[:, ::-1, :]
            test_swapped_img = test_image[:, ::-1, :]