def get_idx_hands_up():
    from src.pose_augment import set_network_input_wh
    set_network_input_wh(368, 368)

    show_sample = True
    db = CocoPoseLMDB('/data/public/rw/coco-pose-estimation-lmdb/', is_train=True, decode_img=show_sample)
    db.reset_state()
    total_cnt = 0
    handup_cnt = 0
    for idx, metas in enumerate(db.get_data()):
        meta = metas[0]
        if len(meta.joint_list) <= 0:
            continue
        body = meta.joint_list[0]
        if body[CocoPart.Neck.value][1] <= 0:
            continue
        if body[CocoPart.LWrist.value][1] <= 0:
            continue
        if body[CocoPart.RWrist.value][1] <= 0:
            continue

        if body[CocoPart.Neck.value][1] > body[CocoPart.LWrist.value][1] or body[CocoPart.Neck.value][1] > body[CocoPart.RWrist.value][1]:
            print(meta.idx)
            handup_cnt += 1

            if show_sample:
                l1, l2, l3 = pose_to_img(metas)
                CocoPose.display_image(l1, l2, l3)

        total_cnt += 1

    print('%d / %d' % (handup_cnt, total_cnt))
Beispiel #2
0
def get_idx_hands_up():
    from src.pose_augment import set_network_input_wh
    set_network_input_wh(368, 368)

    show_sample = True
    db = CocoPoseLMDB('/data/public/rw/coco-pose-estimation-lmdb/', is_train=True, decode_img=show_sample)
    db.reset_state()
    total_cnt = 0
    handup_cnt = 0
    for idx, metas in enumerate(db.get_data()):
        meta = metas[0]
        if len(meta.joint_list) <= 0:
            continue
        body = meta.joint_list[0]
        if body[CocoPart.Neck.value][1] <= 0:
            continue
        if body[CocoPart.LWrist.value][1] <= 0:
            continue
        if body[CocoPart.RWrist.value][1] <= 0:
            continue

        if body[CocoPart.Neck.value][1] > body[CocoPart.LWrist.value][1] or body[CocoPart.Neck.value][1] > body[CocoPart.RWrist.value][1]:
            print(meta.idx)
            handup_cnt += 1

            if show_sample:
                l1, l2, l3 = pose_to_img(metas)
                CocoPose.display_image(l1, l2, l3)

        total_cnt += 1

    print('%d / %d' % (handup_cnt, total_cnt))
Beispiel #3
0
                    average_loss_ll_heat += lss_ll_heat * len(images_test)
                    total_cnt += len(images_test)

                logger.info('validation(%d) %s loss=%f, loss_ll=%f, loss_ll_paf=%f, loss_ll_heat=%f' % (total_cnt, args.tag, average_loss / total_cnt, average_loss_ll / total_cnt, average_loss_ll_paf / total_cnt, average_loss_ll_heat / total_cnt))
                last_gs_num2 = gs_num

                sample_image = [enqueuer.last_dp[0][i] for i in range(4)]
                outputMat = sess.run(
                    outputs,
                    feed_dict={q_inp: np.array((sample_image + val_image) * max(1, (args.batchsize // 16)))}
                )
                pafMat, heatMat = outputMat[:, :, :, 19:], outputMat[:, :, :, :19]

                sample_results = []
                for i in range(len(sample_image)):
                    test_result = CocoPose.display_image(sample_image[i], heatMat[i], pafMat[i], as_numpy=True)
                    test_result = cv2.resize(test_result, (640, 640))
                    test_result = test_result.reshape([640, 640, 3]).astype(float)
                    sample_results.append(test_result)

                test_results = []
                for i in range(len(val_image)):
                    test_result = CocoPose.display_image(val_image[i], heatMat[len(sample_image) + i], pafMat[len(sample_image) + i], as_numpy=True)
                    test_result = cv2.resize(test_result, (640, 640))
                    test_result = test_result.reshape([640, 640, 3]).astype(float)
                    test_results.append(test_result)

                # save summary
                summary = sess.run(merged_validate_op, feed_dict={
                    valid_loss: average_loss / total_cnt,
                    valid_loss_ll: average_loss_ll / total_cnt,
Beispiel #4
0
                ### Change to variable batch size
                # only use 16 image batches
                sample_image = [enqueuer.last_dp[0][i] for i in range(4)]
                test_image = np.array((sample_image + val_image) * args.gpus)
                outputMat = sess.run(outputs,
                                     feed_dict={input_node: test_image})
                pafMat, heatMat = outputMat[:, :, :,
                                            19:], outputMat[:, :, :, :19]

                #TODO not right size
                #take the min length batchsize vs sample images
                ## solved with var batchsize
                test_results = []
                for image, heat, paf in zip(test_image, heatMat, pafMat):
                    test_result = CocoPose.display_image(image,
                                                         heat,
                                                         paf,
                                                         as_numpy=True)
                    test_result = cv2.resize(test_result, (640, 640))
                    test_result = test_result.reshape([640, 640,
                                                       3]).astype(float)
                    test_results.append(test_result)

                train_results = test_results[:4]
                val_results = test_results[4:16]

                # save summary
                #TODO not right size
                ## solved var batchsize
                summary = sess.run(merged_validate_op,
                                   feed_dict={
                                       valid_loss: average_loss / total_cnt,
def sample_augmentations():
    ds = CocoPose('/data/public/rw/coco-pose-estimation-lmdb/', is_train=False, only_idx=0)
    ds = MapDataComponent(ds, pose_random_scale)
    ds = MapDataComponent(ds, pose_rotation)
    ds = MapDataComponent(ds, pose_flip)
    ds = MapDataComponent(ds, pose_resize_shortestedge_random)
    ds = MapDataComponent(ds, pose_crop_random)
    ds = MapData(ds, pose_to_img)
    augs = [
        imgaug.RandomApplyAug(imgaug.RandomChooseAug([
            imgaug.GaussianBlur(3),
            imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
            imgaug.RandomOrderAug([
                imgaug.BrightnessScale((0.8, 1.2), clip=False),
                imgaug.Contrast((0.8, 1.2), clip=False),
                # imgaug.Saturation(0.4, rgb=True),
            ]),
        ]), 0.7),
    ]
    ds = AugmentImageComponent(ds, augs)

    ds.reset_state()
    for l1, l2, l3 in ds.get_data():
        CocoPose.display_image(l1, l2, l3)
    format='[lmdb_dataset] %(asctime)s %(levelname)s %(message)s')

if __name__ == '__main__':
    """
    Speed Test for Getting Input batches from other nodes
    """
    parser = argparse.ArgumentParser(
        description='Worker for preparing input batches.')
    parser.add_argument('--listen', type=str, default='tcp://0.0.0.0:1027')
    parser.add_argument('--show', type=bool, default=False)
    args = parser.parse_args()

    df = RemoteDataZMQ(args.listen)

    logging.info('tcp queue start')
    df.reset_state()
    t = time.time()
    for i, dp in enumerate(df.get_data()):
        if i == 100:
            break
        logging.info('Input batch %d received.' % i)
        if i == 0:
            for d in dp:
                logging.info('%d dp shape={}'.format(d.shape))

        if args.show:
            CocoPose.display_image(dp[0][0], dp[1][0], dp[2][0])

    logging.info('Speed Test Done for 100 Batches in %f seconds.' %
                 (time.time() - t))
Beispiel #7
0
            logging.info('inference- elapsed_time={}'.format(time.time() - a))
            avg += time.time() - a
        logging.info('prediction avg= %f' % (avg / 10))

        logging.info('pose+')
        a = time.time()
        humans = PoseEstimator.estimate(heatMat, pafMat)
        logging.info('pose- elapsed_time={}'.format(time.time() - a))
        for human in humans:
            res = write_coco_json(human, args.input_width, args.input_height)
            print(res)

        logging.info('image={} heatMap={} pafMat={}'.format(
            image.shape, heatMat.shape, pafMat.shape))
        process_img = CocoPose.display_image(image,
                                             heatMat,
                                             pafMat,
                                             as_numpy=True)

        # display
        image = cv2.imread(args.imgpath)
        image_h, image_w = image.shape[:2]
        image = TfPoseEstimator.draw_humans(image, humans)

        scale = 480.0 / image_h
        newh, neww = 480, int(scale * image_w + 0.5)

        image = cv2.resize(image, (neww, newh), interpolation=cv2.INTER_AREA)

        convas = np.zeros([480, 640 + neww, 3], dtype=np.uint8)
        convas[:, :640] = process_img
        convas[:, 640:] = image
Beispiel #8
0
def sample_augmentations():
    ds = CocoPose('/data/public/rw/coco-pose-estimation-lmdb/',
                  is_train=False,
                  only_idx=0)
    ds = MapDataComponent(ds, pose_random_scale)
    ds = MapDataComponent(ds, pose_rotation)
    ds = MapDataComponent(ds, pose_flip)
    ds = MapDataComponent(ds, pose_resize_shortestedge_random)
    ds = MapDataComponent(ds, pose_crop_random)
    ds = MapData(ds, pose_to_img)
    augs = [
        imgaug.RandomApplyAug(
            imgaug.RandomChooseAug([
                imgaug.GaussianBlur(3),
                imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
                imgaug.RandomOrderAug([
                    imgaug.BrightnessScale((0.8, 1.2), clip=False),
                    imgaug.Contrast((0.8, 1.2), clip=False),
                    # imgaug.Saturation(0.4, rgb=True),
                ]),
            ]),
            0.7),
    ]
    ds = AugmentImageComponent(ds, augs)

    ds.reset_state()
    for l1, l2, l3 in ds.get_data():
        CocoPose.display_image(l1, l2, l3)
from pose_dataset import CocoPose

logging.basicConfig(level=logging.DEBUG, format='[lmdb_dataset] %(asctime)s %(levelname)s %(message)s')

if __name__ == '__main__':
    """
    Speed Test for Getting Input batches from other nodes
    """
    parser = argparse.ArgumentParser(description='Worker for preparing input batches.')
    parser.add_argument('--listen', type=str, default='tcp://0.0.0.0:1027')
    parser.add_argument('--show', type=bool, default=False)
    args = parser.parse_args()

    df = RemoteDataZMQ(args.listen)

    logging.info('tcp queue start')
    df.reset_state()
    t = time.time()
    for i, dp in enumerate(df.get_data()):
        if i == 100:
            break
        logging.info('Input batch %d received.' % i)
        if i == 0:
            for d in dp:
                logging.info('%d dp shape={}'.format(d.shape))

        if args.show:
            CocoPose.display_image(dp[0][0], dp[1][0], dp[2][0])

    logging.info('Speed Test Done for 100 Batches in %f seconds.' % (time.time() - t))
Beispiel #10
0
                    average_loss_ll_heat += lss_ll_heat * len(images_test)
                    total_cnt += len(images_test)

                logger.info('validation(%d) %s loss=%f, loss_ll=%f, loss_ll_paf=%f, loss_ll_heat=%f' % (total_cnt, training_name, average_loss / total_cnt, average_loss_ll / total_cnt, average_loss_ll_paf / total_cnt, average_loss_ll_heat / total_cnt))
                last_gs_num2 = gs_num

                sample_image = [enqueuer.last_dp[0][i] for i in range(4)]
                outputMat = sess.run(
                    outputs,
                    feed_dict={q_inp: np.array((sample_image + val_image)*(args.batchsize // 16))}
                )
                pafMat, heatMat = outputMat[:, :, :, 19:], outputMat[:, :, :, :19]

                sample_results = []
                for i in range(len(sample_image)):
                    test_result = CocoPose.display_image(sample_image[i], heatMat[i], pafMat[i], as_numpy=True)
                    test_result = cv2.resize(test_result, (640, 640))
                    test_result = test_result.reshape([640, 640, 3]).astype(float)
                    sample_results.append(test_result)

                test_results = []
                for i in range(len(val_image)):
                    test_result = CocoPose.display_image(val_image[i], heatMat[len(sample_image) + i], pafMat[len(sample_image) + i], as_numpy=True)
                    test_result = cv2.resize(test_result, (640, 640))
                    test_result = test_result.reshape([640, 640, 3]).astype(float)
                    test_results.append(test_result)

                # save summary
                summary = sess.run(merged_validate_op, feed_dict={
                    valid_loss: average_loss / total_cnt,
                    valid_loss_ll: average_loss_ll / total_cnt,