コード例 #1
0
def test_image_cropping():
    handsegnet = HandSegmentationNet()
    preprocess_image = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])

    postprocess_segmentation = PostProcessSegmentation(320, 320)

    localize_hand = pr.Predict(handsegnet, preprocess_image,
                               postprocess_segmentation)
    image = load_image('./sample.jpg')
    hand_crop, segmentation_map, center, boxes, crop_sizes = localize_hand(
        image)
    box = boxes[0]
    xmin, ymin, xmax, ymax = box
    crop_size = crop_sizes[0]

    assert len(hand_crop.shape) == 4
    assert hand_crop.shape == (1, 256, 256, 3)
    assert len(segmentation_map.shape) == 4
    assert segmentation_map.shape == (1, 320, 320, 1)
    assert center == [[191.5, 194.5]]
    assert len(box) == 4
    assert box == [114, 153, 269, 236]
    assert xmax > xmin and ymin > ymax
    assert round(crop_size[0], 2) == 1.32
コード例 #2
0
def test_preprocess_image():
    preprocess_pipeline = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])
    image = load_image('./sample.jpg')
    processed_image = preprocess_pipeline(image)

    assert len(processed_image.shape) == 4
    assert processed_image.shape == (1, 320, 320, 3)
コード例 #3
0
def test_segmentation_postprocess():
    preprocess_pipeline = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])
    image = load_image('./sample.jpg')
    processed_image = preprocess_pipeline(image)

    localization_pipeline = PostProcessSegmentation(HandSegNet)
    localization_output = localization_pipeline(processed_image)

    assert len(localization_output) == 5
    assert localization_output[0].shape == (1, 256, 256, 3)
    assert localization_output[1].shape == (1, 320, 320, 1)
    assert localization_output[2].shape == (1, 2)
    assert localization_output[3].shape == (1, 2, 2)
    assert localization_output[4].shape == (1, 1)
コード例 #4
0
def test_keypoints2D_process():
    preprocess_pipeline = SequentialProcessor(
        [pr.NormalizeImage(),
         pr.ResizeImage((320, 320)),
         pr.ExpandDims(0)])
    image = load_image('./sample.jpg')
    processed_image = preprocess_pipeline(image)

    localization_pipeline = PostProcessSegmentation(HandSegNet)
    localization_output = localization_pipeline(processed_image)

    keypoints_pipeline = Process2DKeypoints(HandPoseNet)
    score_maps_dict = keypoints_pipeline(
        np.squeeze(localization_output[0], axis=0))
    score_maps = score_maps_dict['score_maps']

    assert score_maps.shape == (1, 32, 32, 21)
    assert len(score_maps) == 1
コード例 #5
0
from HandPoseEstimation import HandSegmentationNet, PosePriorNet, PoseNet
from HandPoseEstimation import ViewPointNet
from paz.backend.image.opencv_image import load_image, show_image, write_image
from pipelines import DetectHandKeypoints

use_pretrained = True
HandSegNet = HandSegmentationNet()
HandPoseNet = PoseNet()
HandPosePriorNet = PosePriorNet()
HandViewPointNet = ViewPointNet()

pipeline = DetectHandKeypoints(HandSegNet, HandPoseNet, HandPosePriorNet,
                               HandViewPointNet)

image = load_image('./sample.jpg')
detection = pipeline(image)

show_image(detection['image'].astype('uint8'))
write_image('./detection.jpg', detection['image'].astype('uint8'))