コード例 #1
0
ファイル: test_inference.py プロジェクト: JensBlack/sleap
def test_labels():
    skel = sleap.Skeleton()
    skel.add_node("a")
    skel.add_node("b")

    vid = sleap.Video.from_numpy(np.zeros((8, 12, 12, 1), dtype="uint8"))

    labels = sleap.Labels()
    for fidx in range(len(vid)):
        insts = []
        insts.append(
            sleap.Instance.from_pointsarray(points=np.array([[1, 2], [3, 4]]) +
                                            fidx,
                                            skeleton=skel))
        if fidx >= 3:
            insts.append(
                sleap.Instance.from_pointsarray(
                    points=np.array([[5, 6], [7, 8]]) + fidx, skeleton=skel))

        lf = sleap.LabeledFrame(video=vid, frame_idx=fidx, instances=insts)
        labels.append(lf)

    return labels
コード例 #2
0
def test_random_flipper():
    vid = sleap.Video.from_filename(
        "tests/data/json_format_v1/centered_pair_low_quality.mp4")
    skel = sleap.Skeleton.from_names_and_edge_inds(["A", "BL", "BR"],
                                                   [[0, 1], [0, 2]])
    labels = sleap.Labels([
        sleap.LabeledFrame(
            video=vid,
            frame_idx=0,
            instances=[
                sleap.Instance.from_pointsarray([[25, 50], [50, 25], [25, 25]],
                                                skeleton=skel),
                sleap.Instance.from_pointsarray(
                    [[125, 150], [150, 125], [125, 125]], skeleton=skel),
            ],
        )
    ])

    p = labels.to_pipeline()
    p += sleap.nn.data.augmentation.RandomFlipper.from_skeleton(
        skel, horizontal=True, probability=1.0)
    ex = p.peek()
    np.testing.assert_array_equal(ex["image"], vid[0][0][:, ::-1])
    np.testing.assert_array_equal(
        ex["instances"],
        [
            [[358.0, 50.0], [333.0, 25.0], [358.0, 25.0]],
            [[258.0, 150.0], [233.0, 125.0], [258.0, 125.0]],
        ],
    )

    skel.add_symmetry("BL", "BR")

    p = labels.to_pipeline()
    p += sleap.nn.data.augmentation.RandomFlipper.from_skeleton(
        skel, horizontal=True, probability=1.0)
    ex = p.peek()
    np.testing.assert_array_equal(ex["image"], vid[0][0][:, ::-1])
    np.testing.assert_array_equal(
        ex["instances"],
        [
            [[358.0, 50.0], [358.0, 25.0], [333.0, 25.0]],
            [[258.0, 150.0], [258.0, 125.0], [233.0, 125.0]],
        ],
    )

    p = labels.to_pipeline()
    p += sleap.nn.data.augmentation.RandomFlipper.from_skeleton(
        skel, horizontal=True, probability=0.0)
    ex = p.peek()
    np.testing.assert_array_equal(ex["image"], vid[0][0])
    np.testing.assert_array_equal(
        ex["instances"],
        [[[25, 50], [50, 25], [25, 25]], [[125, 150], [150, 125], [125, 125]]],
    )

    p = labels.to_pipeline()
    p += sleap.nn.data.augmentation.RandomFlipper.from_skeleton(
        skel, horizontal=False, probability=1.0)
    ex = p.peek()
    np.testing.assert_array_equal(ex["image"], vid[0][0][::-1, :])
    np.testing.assert_array_equal(
        ex["instances"],
        [[[25, 333], [25, 358], [50, 358]], [[125, 233], [125, 258],
                                             [150, 258]]],
    )
コード例 #3
0
def test_size_matcher():
    # Create some fake data using two different size videos.
    skeleton = sleap.Skeleton.from_names_and_edge_inds(["A"])
    labels = sleap.Labels([
        sleap.LabeledFrame(
            frame_idx=0,
            video=sleap.Video.from_filename(TEST_SMALL_ROBOT_MP4_FILE,
                                            grayscale=True),
            instances=[
                sleap.Instance.from_pointsarray(np.array([[128, 128]]),
                                                skeleton=skeleton)
            ],
        ),
        sleap.LabeledFrame(
            frame_idx=0,
            video=sleap.Video.from_filename(TEST_H5_FILE,
                                            dataset="/box",
                                            input_format="channels_first"),
            instances=[
                sleap.Instance.from_pointsarray(np.array([[128, 128]]),
                                                skeleton=skeleton)
            ],
        ),
    ])

    # Create a loader for those labels.
    labels_reader = providers.LabelsReader(labels)
    ds = labels_reader.make_dataset()
    ds_iter = iter(ds)
    assert next(ds_iter)["image"].shape == (320, 560, 1)
    assert next(ds_iter)["image"].shape == (512, 512, 1)

    def check_padding(image, from_y, to_y, from_x, to_x):
        assert (image.numpy()[from_y:to_y, from_x:to_x] == 0).all()

    # Check SizeMatcher when target dims is not strictly larger than actual image dims
    size_matcher = SizeMatcher(max_image_height=560, max_image_width=560)
    transform_iter = iter(size_matcher.transform_dataset(ds))
    im1 = next(transform_iter)["image"]
    assert im1.shape == (560, 560, 1)
    # padding should be on the bottom
    check_padding(im1, 321, 560, 0, 560)
    im2 = next(transform_iter)["image"]
    assert im2.shape == (560, 560, 1)

    # Variant 2
    size_matcher = SizeMatcher(max_image_height=320, max_image_width=560)
    transform_iter = iter(size_matcher.transform_dataset(ds))
    im1 = next(transform_iter)["image"]
    assert im1.shape == (320, 560, 1)
    im2 = next(transform_iter)["image"]
    assert im2.shape == (320, 560, 1)
    # padding should be on the right
    check_padding(im2, 0, 320, 321, 560)

    # Check SizeMatcher when target is 'max' in both dimensions
    size_matcher = SizeMatcher(max_image_height=512, max_image_width=560)
    transform_iter = iter(size_matcher.transform_dataset(ds))
    im1 = next(transform_iter)["image"]
    assert im1.shape == (512, 560, 1)
    # Check padding is on the bottom
    check_padding(im1, 320, 512, 0, 560)
    im2 = next(transform_iter)["image"]
    assert im2.shape == (512, 560, 1)
    # Check padding is on the right
    check_padding(im2, 0, 512, 512, 560)

    # Check SizeMatcher when target is larger in both dimensions
    size_matcher = SizeMatcher(max_image_height=750, max_image_width=750)
    transform_iter = iter(size_matcher.transform_dataset(ds))
    im1 = next(transform_iter)["image"]
    assert im1.shape == (750, 750, 1)
    # Check padding is on the bottom
    check_padding(im1, 700, 750, 0, 750)
    im2 = next(transform_iter)["image"]
    assert im2.shape == (750, 750, 1)
コード例 #4
0
def test_merge_with_package(min_labels_robot, tmpdir):
    # Add a suggestion and save with images.
    labels = min_labels_robot
    labels.suggestions.append(
        sleap.io.dataset.SuggestionFrame(video=labels.video, frame_idx=1))
    pkg_path = os.path.join(tmpdir, "test.pkg.slp")
    assert len(labels.predicted_instances) == 0
    labels.save(pkg_path, with_images=True, embed_suggested=True)

    # Load package.
    labels_pkg = sleap.load_file(pkg_path)
    assert isinstance(labels_pkg.video.backend, sleap.io.video.HDF5Video)
    assert labels_pkg.video.backend.has_embedded_images
    assert isinstance(labels_pkg.video.backend._source_video.backend,
                      sleap.io.video.MediaVideo)
    assert len(labels_pkg.predicted_instances) == 0

    # Add prediction.
    inst = labels_pkg.user_instances[0]
    pts = inst.numpy()
    inst_pr = sleap.PredictedInstance.from_pointsarray(
        pts,
        skeleton=labels_pkg.skeleton,
        point_confidences=np.zeros(len(pts)),
        instance_score=1.0,
    )
    labels_pkg.append(
        sleap.LabeledFrame(
            video=labels_pkg.suggestions[0].video,
            frame_idx=labels_pkg.suggestions[0].frame_idx,
            instances=[inst_pr],
        ))

    # Save labels without image data.
    preds_path = pkg_path + ".predictions.slp"
    labels_pkg.save(preds_path)

    # Load predicted labels created from package.
    labels_pr = sleap.load_file(preds_path)
    assert len(labels_pr.predicted_instances) == 1

    # Merge with base labels.
    base_video_path = labels.video.backend.filename
    merged, extra_base, extra_new = sleap.Labels.complex_merge_between(
        labels, labels_pr)
    assert len(labels.videos) == 1
    assert labels.video.backend.filename == base_video_path
    assert len(labels.predicted_instances) == 1
    assert len(extra_base) == 0
    assert len(extra_new) == 0
    assert labels.predicted_instances[0].frame.frame_idx == 1

    # Merge predictions to package instead.
    labels_pkg = sleap.load_file(pkg_path)
    labels_pr = sleap.load_file(preds_path)
    assert len(labels_pkg.predicted_instances) == 0
    base_video_path = labels_pkg.video.backend.filename
    merged, extra_base, extra_new = sleap.Labels.complex_merge_between(
        labels_pkg, labels_pr)
    assert len(labels_pkg.videos) == 1
    assert labels_pkg.video.backend.filename == base_video_path
    assert len(labels_pkg.predicted_instances) == 1
    assert len(extra_base) == 0
    assert len(extra_new) == 0
    assert labels_pkg.predicted_instances[0].frame.frame_idx == 1
コード例 #5
0
def make_grouped_labeled_frame(
    video_ind: int,
    frame_ind: int,
    frame_examples: List[Dict[Text, tf.Tensor]],
    videos: List[sleap.Video],
    skeleton: "Skeleton",
    points_key: Text,
    point_confidences_key: Text,
    image_key: Optional[Text] = None,
    instance_score_key: Optional[Text] = None,
    tracker: Optional[Tracker] = None,
) -> List[sleap.LabeledFrame]:

    predicted_frames = []

    # Create predicted instances from examples in the current frame.
    predicted_instances = []
    img = None
    for example in frame_examples:
        if instance_score_key is None:
            instance_scores = np.nansum(example[point_confidences_key].numpy(), axis=-1)
        else:
            instance_scores = example[instance_score_key]

        if example[points_key].ndim == 3:
            for points, confidences, instance_score in zip(
                example[points_key], example[point_confidences_key], instance_scores
            ):
                if not np.isnan(points).all():
                    predicted_instances.append(
                        sleap.PredictedInstance.from_arrays(
                            points=points,
                            point_confidences=confidences,
                            instance_score=instance_score,
                            skeleton=skeleton,
                        )
                    )
        else:
            points = example[points_key]
            confidences = example[point_confidences_key]
            instance_score = instance_scores

            if not np.isnan(points).all():
                predicted_instances.append(
                    sleap.PredictedInstance.from_arrays(
                        points=points,
                        point_confidences=confidences,
                        instance_score=instance_score,
                        skeleton=skeleton,
                    )
                )

        if image_key is not None and image_key in example:
            img = example[image_key]
        else:
            img = None

    if len(predicted_instances) > 0:
        if tracker:
            # Set tracks for predicted instances in this frame.
            predicted_instances = tracker.track(
                untracked_instances=predicted_instances, img=img, t=frame_ind
            )

        # Create labeled frame from predicted instances.
        labeled_frame = sleap.LabeledFrame(
            video=videos[video_ind], frame_idx=frame_ind, instances=predicted_instances
        )

        predicted_frames.append(labeled_frame)

    return predicted_frames
コード例 #6
0
def test_split_labels_train_val():
    vid = sleap.Video(backend=sleap.io.video.MediaVideo)
    labels = sleap.Labels([sleap.LabeledFrame(video=vid, frame_idx=0)])

    train, train_inds, val, val_inds = split_labels_train_val(labels, 0)
    assert len(train) == 1
    assert len(val) == 1

    train, train_inds, val, val_inds = split_labels_train_val(labels, 0.1)
    assert len(train) == 1
    assert len(val) == 1

    train, train_inds, val, val_inds = split_labels_train_val(labels, 0.5)
    assert len(train) == 1
    assert len(val) == 1

    train, train_inds, val, val_inds = split_labels_train_val(labels, 1.0)
    assert len(train) == 1
    assert len(val) == 1

    labels = sleap.Labels([
        sleap.LabeledFrame(video=vid, frame_idx=0),
        sleap.LabeledFrame(video=vid, frame_idx=1),
    ])
    train, train_inds, val, val_inds = split_labels_train_val(labels, 0)
    assert len(train) == 1
    assert len(val) == 1
    assert train[0].frame_idx != val[0].frame_idx

    train, train_inds, val, val_inds = split_labels_train_val(labels, 0.1)
    assert len(train) == 1
    assert len(val) == 1
    assert train[0].frame_idx != val[0].frame_idx

    train, train_inds, val, val_inds = split_labels_train_val(labels, 0.5)
    assert len(train) == 1
    assert len(val) == 1
    assert train[0].frame_idx != val[0].frame_idx

    train, train_inds, val, val_inds = split_labels_train_val(labels, 1.0)
    assert len(train) == 1
    assert len(val) == 1
    assert train[0].frame_idx != val[0].frame_idx

    labels = sleap.Labels([
        sleap.LabeledFrame(video=vid, frame_idx=0),
        sleap.LabeledFrame(video=vid, frame_idx=1),
        sleap.LabeledFrame(video=vid, frame_idx=2),
    ])
    train, train_inds, val, val_inds = split_labels_train_val(labels, 0)
    assert len(train) == 2
    assert len(val) == 1

    train, train_inds, val, val_inds = split_labels_train_val(labels, 0.1)
    assert len(train) == 2
    assert len(val) == 1

    train, train_inds, val, val_inds = split_labels_train_val(labels, 0.5)
    assert len(train) + len(val) == 3

    train, train_inds, val, val_inds = split_labels_train_val(labels, 1.0)
    assert len(train) == 1
    assert len(val) == 2