コード例 #1
0
ファイル: test_instance.py プロジェクト: rlinus/sleap
def test_instance_node_get_set_item(skeleton):
    """
    Test basic get item and set item functionality of instances.
    """
    instance = Instance(skeleton=skeleton)
    instance["head"].x = 20
    instance["head"].y = 50

    instance["left-wing"] = Point(x=30, y=40, visible=False)

    assert instance["head"].x == 20
    assert instance["head"].y == 50

    assert instance["left-wing"] == Point(x=30, y=40, visible=False)

    thorax_point = instance["thorax"]
    assert math.isnan(thorax_point.x) and math.isnan(thorax_point.y)

    instance[0] = [-20, -50]
    assert instance["head"].x == -20
    assert instance["head"].y == -50

    instance[0] = np.array([-21, -51])
    assert instance["head"].x == -21
    assert instance["head"].y == -51
コード例 #2
0
def test_points_array_copying(skeleton):
    node_names = ["left-wing", "head", "right-wing"]
    points = {"head": Point(1, 4), "left-wing": Point(2, 5), "right-wing": Point(3, 6)}

    instance1 = Instance(skeleton=skeleton, points=points)

    first_node = skeleton.nodes[0]

    # Make sure that changing *uncopied* points array does change instance.
    pts = instance1.get_points_array(copy=False)
    assert pts[0]["x"] == instance1[first_node].x
    pts[0]["x"] = 123
    assert pts[0]["x"] == instance1[first_node].x  # these should match

    # Make sure that changing copied points array doesn't change instance.
    pts = instance1.get_points_array(copy=True)
    assert pts[0][0] == instance1[first_node].x
    pts[0][0] = 456
    assert pts[0][0] != instance1[first_node].x  # these shouldn't match

    # Make sure we can get full copy
    pts = instance1.get_points_array(copy=True, full=True)
    assert pts.shape[1] == 4  # x, y, visible, complete

    # Make sure we can get copy with just coordinates
    pts = instance1.get_points_array(copy=True, full=False)
    assert pts.shape[1] == 2  # x, y
コード例 #3
0
def test_points_array(skeleton):
    """ Test conversion of instances to points array"""

    node_names = ["left-wing", "head", "right-wing"]
    points = {"head": Point(1, 4), "left-wing": Point(2, 5), "right-wing": Point(3, 6)}

    instance1 = Instance(skeleton=skeleton, points=points)

    pts = instance1.get_points_array()

    assert pts.shape == (len(skeleton.nodes), 2)
    assert np.allclose(pts[skeleton.node_to_index("left-wing"), :], [2, 5])
    assert np.allclose(pts[skeleton.node_to_index("head"), :], [1, 4])
    assert np.allclose(pts[skeleton.node_to_index("right-wing"), :], [3, 6])
    assert np.isnan(pts[skeleton.node_to_index("thorax"), :]).all()

    # Now change a point, make sure it is reflected
    instance1["head"].x = 0
    instance1["thorax"] = Point(1, 2)
    pts = instance1.get_points_array()
    assert np.allclose(pts[skeleton.node_to_index("head"), :], [0, 4])
    assert np.allclose(pts[skeleton.node_to_index("thorax"), :], [1, 2])

    # Make sure that invisible points are nan iff invisible_as_nan=True
    instance1["thorax"] = Point(1, 2, visible=False)

    pts = instance1.get_points_array()
    assert not np.isnan(pts[skeleton.node_to_index("thorax"), :]).all()

    pts = instance1.points_array
    assert np.isnan(pts[skeleton.node_to_index("thorax"), :]).all()
コード例 #4
0
ファイル: datasets.py プロジェクト: xuerenjie124/sleap
def multi_skel_vid_labels(hdf5_vid, small_robot_mp4_vid, skeleton, stickman):
    """
    Build a big list of LabeledFrame objects and wrap it in Labels class.

    Args:
        hdf5_vid: An HDF5 video fixture
        small_robot_mp4_vid: An MP4 video fixture
        skeleton: A fly skeleton.
        stickman: A stickman skeleton

    Returns:
        The Labels object containing all the labeled frames
    """
    labels = []
    stick_tracks = [
        Track(spawned_on=0, name=f"Stickman {i}") for i in range(6)
    ]
    fly_tracks = [Track(spawned_on=0, name=f"Fly {i}") for i in range(6)]

    # Make some tracks None to test that
    fly_tracks[3] = None
    stick_tracks[2] = None

    for f in range(500):
        vid = [hdf5_vid, small_robot_mp4_vid][f % 2]
        label = LabeledFrame(video=vid, frame_idx=f % vid.frames)

        fly_instances = []
        for i in range(6):
            fly_instances.append(
                Instance(skeleton=skeleton, track=fly_tracks[i]))
            for node in skeleton.nodes:
                fly_instances[i][node] = Point(x=i % vid.width,
                                               y=i % vid.height)

        stickman_instances = []
        for i in range(6):
            stickman_instances.append(
                Instance(skeleton=stickman, track=stick_tracks[i]))
            for node in stickman.nodes:
                stickman_instances[i][node] = Point(x=i % vid.width,
                                                    y=i % vid.height)

        label.instances = stickman_instances + fly_instances
        labels.append(label)

    labels = Labels(labels)

    return labels
コード例 #5
0
ファイル: test_instance.py プロジェクト: xuerenjie124/sleap
def test_frame_merge_predicted_and_user(skeleton, centered_pair_vid):
    user_inst = Instance(
        skeleton=skeleton,
        points={skeleton.nodes[0]: Point(1, 2)},
    )
    user_frame = LabeledFrame(
        video=centered_pair_vid,
        frame_idx=0,
        instances=[user_inst],
    )

    pred_inst = PredictedInstance(
        skeleton=skeleton,
        points={skeleton.nodes[0]: PredictedPoint(1, 2, score=1.0)},
        score=1.0,
    )
    pred_frame = LabeledFrame(
        video=centered_pair_vid,
        frame_idx=0,
        instances=[pred_inst],
    )

    LabeledFrame.complex_frame_merge(user_frame, pred_frame)

    # We should be able to cleanly merge the user and the predicted instance,
    # and we want to retain both even though they perfectly match.
    assert user_inst in user_frame.instances
    assert pred_inst in user_frame.instances
    assert len(user_frame.instances) == 2
コード例 #6
0
def test_frame_merge_between_predicted_and_user(skeleton, centered_pair_vid):
    user_inst = Instance(skeleton=skeleton, points={skeleton.nodes[0]: Point(1, 2)},)
    user_labels = Labels(
        [LabeledFrame(video=centered_pair_vid, frame_idx=0, instances=[user_inst],)]
    )

    pred_inst = PredictedInstance(
        skeleton=skeleton,
        points={skeleton.nodes[0]: PredictedPoint(1, 2, score=1.0)},
        score=1.0,
    )
    pred_labels = Labels(
        [LabeledFrame(video=centered_pair_vid, frame_idx=0, instances=[pred_inst],)]
    )

    # Merge predictions into current labels dataset
    _, _, new_conflicts = Labels.complex_merge_between(
        user_labels,
        new_labels=pred_labels,
        unify=False,  # since we used match_to when loading predictions file
    )

    # new predictions should replace old ones
    Labels.finish_complex_merge(user_labels, new_conflicts)

    # We should be able to cleanly merge the user and the predicted instance,
    # and we want to retain both even though they perfectly match.
    assert user_inst in user_labels[0].instances
    assert pred_inst in user_labels[0].instances
    assert len(user_labels[0].instances) == 2
コード例 #7
0
def test_instance_node_multi_get_set_item(skeleton):
    """
    Test basic get item and set item functionality of instances.
    """
    node_names = ["left-wing", "head", "right-wing"]
    points = {"head": Point(1, 4), "left-wing": Point(2, 5), "right-wing": Point(3, 6)}

    instance1 = Instance(skeleton=skeleton, points=points)

    instance1[node_names] = list(points.values())

    x_values = [p.x for p in instance1[node_names]]
    y_values = [p.y for p in instance1[node_names]]

    assert np.allclose(x_values, [1, 2, 3])
    assert np.allclose(y_values, [4, 5, 6])
コード例 #8
0
def test_instance_point_iter(skeleton):
    """
    Test iteration methods over instances.
    """
    node_names = ["left-wing", "head", "right-wing"]
    points = {"head": Point(1, 4), "left-wing": Point(2, 5), "right-wing": Point(3, 6)}

    instance = Instance(skeleton=skeleton, points=points)

    assert [node.name for node in instance.nodes] == ["head", "left-wing", "right-wing"]
    assert np.allclose([p.x for p in instance.points], [1, 2, 3])
    assert np.allclose([p.y for p in instance.points], [4, 5, 6])

    # Make sure we can iterate over tuples
    for (node, point) in instance.nodes_points:
        assert points[node.name] == point
コード例 #9
0
def test_modifying_skeleton(skeleton):
    node_names = ["left-wing", "head", "right-wing"]
    points = {"head": Point(1, 4), "left-wing": Point(2, 5), "right-wing": Point(3, 6)}

    instance1 = Instance(skeleton=skeleton, points=points)

    assert len(instance1.points) == 3

    skeleton.add_node("new test node")

    instance1.points  # this updates instance with changes from skeleton
    instance1["new test node"] = Point(7, 8)

    assert len(instance1.points) == 4

    skeleton.delete_node("head")
    assert len(instance1.points) == 3
コード例 #10
0
ファイル: test_dataset.py プロジェクト: xuerenjie124/sleap
def test_merge_predictions():
    dummy_video_a = Video.from_filename("foo.mp4")
    dummy_video_b = Video.from_filename("foo.mp4")

    dummy_skeleton_a = Skeleton()
    dummy_skeleton_a.add_node("node")

    dummy_skeleton_b = Skeleton()
    dummy_skeleton_b.add_node("node")

    dummy_instances_a = []
    dummy_instances_a.append(
        Instance(skeleton=dummy_skeleton_a, points=dict(node=Point(1, 1)))
    )
    dummy_instances_a.append(
        Instance(skeleton=dummy_skeleton_a, points=dict(node=Point(2, 2)))
    )

    labels_a = Labels()
    labels_a.append(
        LabeledFrame(dummy_video_a, frame_idx=0, instances=dummy_instances_a)
    )

    dummy_instances_b = []
    dummy_instances_b.append(
        Instance(skeleton=dummy_skeleton_b, points=dict(node=Point(1, 1)))
    )
    dummy_instances_b.append(
        PredictedInstance(
            skeleton=dummy_skeleton_b, points=dict(node=Point(3, 3)), score=1
        )
    )

    labels_b = Labels()
    labels_b.append(
        LabeledFrame(dummy_video_b, frame_idx=0, instances=dummy_instances_b)
    )

    # Frames have one redundant instance (perfect match) and all the
    # non-matching instances are different types (one predicted, one not).
    merged, extra_a, extra_b = Labels.complex_merge_between(labels_a, labels_b)
    assert len(merged[dummy_video_a]) == 1
    assert len(merged[dummy_video_a][0]) == 1  # the predicted instance was merged
    assert not extra_a
    assert not extra_b
コード例 #11
0
ファイル: instances.py プロジェクト: rlinus/sleap
def instances(skeleton):

    # Generate some instances
    NUM_INSTANCES = 500

    instances = []
    for i in range(NUM_INSTANCES):
        instance = Instance(skeleton=skeleton)
        instance["head"] = Point(i * 1, i * 2)
        instance["left-wing"] = Point(10 + i * 1, 10 + i * 2)
        instance["right-wing"] = Point(20 + i * 1, 20 + i * 2)

        # Lets make an NaN entry to test skip_nan as well
        instance["thorax"]

        instances.append(instance)

    return instances
コード例 #12
0
def test_non_exist_node(skeleton):
    """
    Test is instances throw key errors for nodes that don't exist in the skeleton.
    """
    instance = Instance(skeleton=skeleton)

    with pytest.raises(KeyError):
        instance["non-existent-node"].x = 1

    with pytest.raises(KeyError):
        instance = Instance(skeleton=skeleton, points={"non-exist": Point()})
コード例 #13
0
def test_instance_comparison(skeleton):

    node_names = ["left-wing", "head", "right-wing"]
    points = {"head": Point(1, 4), "left-wing": Point(2, 5), "right-wing": Point(3, 6)}

    instance1 = Instance(skeleton=skeleton, points=points)
    instance2 = copy.deepcopy(instance1)

    assert instance1.matches(instance1)

    assert instance1 != instance2

    assert instance1.matches(instance2)

    instance2["head"].x = 42
    assert not instance1.matches(instance2)

    instance2 = copy.deepcopy(instance1)
    instance2.skeleton.add_node("extra_node")
    assert not instance1.matches(instance2)
コード例 #14
0
ファイル: instances.py プロジェクト: rlinus/sleap
def multi_skel_instances(skeleton, stickman):
    """
    Setup some instances that reference multiple skeletons
    """

    # Generate some instances
    NUM_INSTANCES = 500

    instances = []
    for i in range(NUM_INSTANCES):
        instance = Instance(skeleton=skeleton, video=None, frame_idx=i)
        instance["head"] = Point(i * 1, i * 2)
        instance["left-wing"] = Point(10 + i * 1, 10 + i * 2)
        instance["right-wing"] = Point(20 + i * 1, 20 + i * 2)

        # Lets make an NaN entry to test skip_nan as well
        instance["thorax"]

        instances.append(instance)

    # Setup some instances of the stick man on the same frames
    for i in range(NUM_INSTANCES):
        instance = Instance(skeleton=stickman, video=None, frame_idx=i)
        instance["head"] = Point(i * 10, i * 20)
        instance["body"] = Point(100 + i * 1, 100 + i * 2)
        instance["left-arm"] = Point(200 + i * 1, 200 + i * 2)

        instances.append(instance)

    return instances
コード例 #15
0
ファイル: test_instance.py プロジェクト: rlinus/sleap
def test_instance_node_multi_get_set_item(skeleton):
    """
    Test basic get item and set item functionality of instances.
    """
    node_names = ["head", "left-wing", "right-wing"]
    points = {
        "head": Point(1, 4),
        "left-wing": Point(2, 5),
        "right-wing": Point(3, 6)
    }

    instance1 = Instance(skeleton=skeleton, points=points)

    instance1[node_names] = list(points.values())

    x_values = [p.x for p in instance1[node_names]]
    y_values = [p.y for p in instance1[node_names]]

    assert np.allclose(x_values, [1, 2, 3])
    assert np.allclose(y_values, [4, 5, 6])

    np.testing.assert_array_equal(instance1[np.array([0, 2, 4])],
                                  [[1, 4], [np.nan, np.nan], [3, 6]])

    instance1[np.array([0, 1])] = [[1, 2], [3, 4]]
    np.testing.assert_array_equal(instance1[np.array([0, 1])],
                                  [[1, 2], [3, 4]])

    instance1[[0, 1]] = [[4, 3], [2, 1]]
    np.testing.assert_array_equal(instance1[np.array([0, 1])],
                                  [[4, 3], [2, 1]])

    instance1[["left-wing", "right-wing"]] = [[-4, -3], [-2, -1]]
    np.testing.assert_array_equal(instance1[np.array([3, 4])],
                                  [[-4, -3], [-2, -1]])
    assert instance1["left-wing"].x == -4
    assert instance1["left-wing"].y == -3
    assert instance1["right-wing"].x == -2
    assert instance1["right-wing"].y == -1
コード例 #16
0
def test_constructor():
    p = Point(x=1.0, y=2.0, visible=False, complete=True)
    assert p.x == 1.0
    assert p.y == 2.0
    assert p.visible == False
    assert p.complete == True

    p = PredictedPoint(x=1.0, y=2.0, visible=False, complete=True, score=0.3)
    assert p.x == 1.0
    assert p.y == 2.0
    assert p.visible == False
    assert p.complete == True
    assert p.score == 0.3
コード例 #17
0
ファイル: deepposekit.py プロジェクト: xuerenjie124/sleap
    def read(
        cls,
        file: FileHandle,
        video_path: str,
        skeleton_path: str,
        *args,
        **kwargs,
    ) -> Labels:
        f = file.file

        video = Video.from_filename(video_path)
        skeleton_data = pd.read_csv(skeleton_path, header=0)

        skeleton = Skeleton()
        skeleton.add_nodes(skeleton_data["name"])
        nodes = skeleton.nodes

        for name, parent, swap in skeleton_data.itertuples(index=False,
                                                           name=None):
            if parent is not np.nan:
                skeleton.add_edge(parent, name)

        lfs = []

        pose_matrix = f["pose"][:]

        track_count, frame_count, node_count, _ = pose_matrix.shape

        tracks = [Track(0, f"Track {i}") for i in range(track_count)]
        for frame_idx in range(frame_count):
            lf_instances = []
            for track_idx in range(track_count):
                points_array = pose_matrix[track_idx, frame_idx, :, :]
                points = dict()
                for p in range(len(points_array)):
                    x, y, score = points_array[p]
                    points[nodes[p]] = Point(x, y)  # TODO: score

                inst = Instance(skeleton=skeleton,
                                track=tracks[track_idx],
                                points=points)
                lf_instances.append(inst)
            lfs.append(
                LabeledFrame(video,
                             frame_idx=frame_idx,
                             instances=lf_instances))

        return Labels(labeled_frames=lfs)
コード例 #18
0
def make_mean_instance(aligned_points_arrays: List[np.ndarray],
                       std_thresh: int = 0) -> Instance:
    mean, stdev = get_mean_and_std_for_points(aligned_points_arrays)

    # Remove points with standard deviation higher than threshold
    if std_thresh:
        mean[stdev > std_thresh] = np.nan

    from sleap import Instance
    from sleap.instance import Point

    OFFSET = 0  # FIXME

    new_instance = Instance(
        skeleton=labels.skeletons[0],
        points=[Point(p[0] + OFFSET, p[1] + OFFSET) for p in mean],
    )
    return new_instance
コード例 #19
0
ファイル: test_dataset.py プロジェクト: xuerenjie124/sleap
def test_labels_merge():
    dummy_video = Video(backend=MediaVideo)
    dummy_skeleton = Skeleton()
    dummy_skeleton.add_node("node")

    labels = Labels()
    dummy_frames = []

    # Add 10 instances with different points (so they aren't "redundant")
    for i in range(10):
        instance = Instance(skeleton=dummy_skeleton, points=dict(node=Point(i, i)))
        dummy_frame = LabeledFrame(dummy_video, frame_idx=0, instances=[instance])
        dummy_frames.append(dummy_frame)

    labels.labeled_frames.extend(dummy_frames)
    assert len(labels) == 10
    assert len(labels.labeled_frames[0].instances) == 1

    labels.merge_matching_frames()
    assert len(labels) == 1
    assert len(labels.labeled_frames[0].instances) == 10
コード例 #20
0
    def get_frame_instances(video_id, frame_idx):
        """ """
        is_in_frame = (points["videoId"] == video_id) & (
            points["frameIdx"] == frame_idx
        )
        if not is_in_frame.any():
            return []

        instances = []
        frame_instance_ids = np.unique(points["instanceId"][is_in_frame])
        for i, instance_id in enumerate(frame_instance_ids):
            is_instance = is_in_frame & (points["instanceId"] == instance_id)
            instance_points = {
                data["skeleton"]["nodeNames"][n]: Point(x, y, visible=v)
                for x, y, n, v in zip(
                    *[points[k][is_instance] for k in ["x", "y", "node", "visible"]]
                )
            }

            instance = Instance(skeleton=skeleton, points=instance_points)
            instances.append(instance)

        return instances
コード例 #21
0
def test_skeleton_node_name_change():
    """
    Test that and instance is not broken after a node on the
    skeleton has its name changed.
    """

    s = Skeleton("Test")
    s.add_nodes(["a", "b", "c", "d", "e"])
    s.add_edge("a", "b")

    instance = Instance(s)
    instance["a"] = Point(1, 2)
    instance["b"] = Point(3, 4)

    # Rename the node
    s.relabel_nodes({"a": "A"})

    # Reference to the old node name should raise a KeyError
    with pytest.raises(KeyError):
        instance["a"].x = 2

    # Make sure the A now references the same point on the instance
    assert instance["A"] == Point(1, 2)
    assert instance["b"] == Point(3, 4)
コード例 #22
0
    def read_frames(
        cls,
        file: FileHandle,
        skeleton: Optional[Skeleton] = None,
        full_video: Optional[Video] = None,
        *args,
        **kwargs,
    ) -> List[LabeledFrame]:
        filename = file.filename

        data = pd.read_csv(filename, header=[1, 2])

        # Create the skeleton from the list of nodes in the csv file.
        # Note that DeepLabCut doesn't have edges, so these will need to be
        # added by user later.
        node_names = [n[0] for n in list(data)[1::2]]

        if skeleton is None:
            skeleton = Skeleton()
            skeleton.add_nodes(node_names)

        img_files = data.iloc[:, 0]  # get list of all images

        if full_video:
            video = full_video
            index_frames_by_original_index = True
        else:
            # Create the Video object
            img_dir = os.path.dirname(filename)
            video = cls.make_video_for_image_list(img_dir, img_files)

            # The frames in the video we created will be indexed from 0 to N
            # rather than having their index from the original source video.
            index_frames_by_original_index = False

        frames = []
        for i in range(len(data)):
            # get points for each node
            instance_points = dict()
            for node in node_names:
                x, y = data[(node, "x")][i], data[(node, "y")][i]
                instance_points[node] = Point(x, y)

            # Create instance with points.
            # For DeepLabCut we're assuming there's a single instance per frame.
            instance = Instance(skeleton=skeleton, points=instance_points)

            if index_frames_by_original_index:
                # extract "0123" from "path/img0123.png" as original frame index
                frame_idx_match = re.search("(?<=img)(\\d+)(?=\.png)", img_files[i])

                if frame_idx_match is not None:
                    frame_idx = int(frame_idx_match.group(0))
                else:
                    raise ValueError(
                        f"Unable to determine frame index for image {img_files[i]}"
                    )

            else:
                frame_idx = i

            # create labeledframe and add it to list
            frames.append(
                LabeledFrame(video=video, frame_idx=frame_idx, instances=[instance])
            )

        return frames
コード例 #23
0
ファイル: deeplabcut.py プロジェクト: stallam-unb/sleap
    def read_frames(
        cls,
        file: FileHandle,
        skeleton: Optional[Skeleton] = None,
        full_video: Optional[Video] = None,
        *args,
        **kwargs,
    ) -> List[LabeledFrame]:
        filename = file.filename

        # Read CSV file.
        data = pd.read_csv(filename, header=[1, 2])

        # Check if this is in the new multi-animal format.
        is_multianimal = data.columns[0][0] == "individuals"

        if is_multianimal:
            # Reload with additional header rows if using new format.
            data = pd.read_csv(filename, header=[1, 2, 3])

            # Pull out animal and node names from the columns.
            animal_names = []
            node_names = []
            for animal_name, node_name, _ in data.columns[1:][::2]:
                if animal_name not in animal_names:
                    animal_names.append(animal_name)
                if node_name not in node_names:
                    node_names.append(node_name)

        else:
            # Create the skeleton from the list of nodes in the csv file.
            # Note that DeepLabCut doesn't have edges, so these will need to be
            # added by user later.
            node_names = [n[0] for n in list(data)[1::2]]

        if skeleton is None:
            skeleton = Skeleton()
            skeleton.add_nodes(node_names)

        # Get list of all images filenames.
        img_files = data.iloc[:, 0]

        if full_video:
            video = full_video
            index_frames_by_original_index = True
        else:
            # Create the Video object
            img_dir = os.path.dirname(filename)
            video = cls.make_video_for_image_list(img_dir, img_files)

            # The frames in the video we created will be indexed from 0 to N
            # rather than having their index from the original source video.
            index_frames_by_original_index = False

        lfs = []
        for i in range(len(data)):

            # Figure out frame index to use.
            if index_frames_by_original_index:
                # Extract "0123" from "path/img0123.png" as original frame index.
                frame_idx_match = re.search("(?<=img)(\\d+)(?=\\.png)", img_files[i])

                if frame_idx_match is not None:
                    frame_idx = int(frame_idx_match.group(0))
                else:
                    raise ValueError(
                        f"Unable to determine frame index for image {img_files[i]}"
                    )
            else:
                frame_idx = i

            instances = []
            if is_multianimal:
                for animal_name in animal_names:
                    any_not_missing = False
                    # Get points for each node.
                    instance_points = dict()
                    for node in node_names:
                        x, y = (
                            data[(animal_name, node, "x")][i],
                            data[(animal_name, node, "y")][i],
                        )
                        instance_points[node] = Point(x, y)
                        if ~(np.isnan(x) and np.isnan(y)):
                            any_not_missing = True

                    if any_not_missing:
                        # Create instance with points.
                        instances.append(
                            Instance(skeleton=skeleton, points=instance_points)
                        )
            else:
                # Get points for each node.
                instance_points = dict()
                for node in node_names:
                    x, y = data[(node, "x")][i], data[(node, "y")][i]
                    instance_points[node] = Point(x, y)

                # Create instance with points assuming there's a single instance per
                # frame.
                instances.append(Instance(skeleton=skeleton, points=instance_points))

            # Create LabeledFrame and add it to list.
            lfs.append(
                LabeledFrame(video=video, frame_idx=frame_idx, instances=instances)
            )

        return lfs
コード例 #24
0
    def read(
        cls,
        file: FileHandle,
        img_dir: str,
        use_missing_gui: bool = False,
        *args,
        **kwargs,
    ) -> Labels:

        dicts = file.json

        # Make skeletons from "categories"
        skeleton_map = dict()
        for category in dicts["categories"]:
            skeleton = Skeleton(name=category["name"])
            skeleton_id = category["id"]
            node_names = category["keypoints"]
            skeleton.add_nodes(node_names)

            try:
                for src_idx, dst_idx in category["skeleton"]:
                    skeleton.add_edge(node_names[src_idx], node_names[dst_idx])
            except IndexError as e:
                # According to the COCO data format specifications[^1], the edges
                # are supposed to be 1-indexed. But in some of their own
                # dataset the edges are 1-indexed! So we'll try.
                # [1]: http://cocodataset.org/#format-data

                # Clear any edges we already created using 0-indexing
                skeleton.clear_edges()

                # Add edges
                for src_idx, dst_idx in category["skeleton"]:
                    skeleton.add_edge(node_names[src_idx - 1], node_names[dst_idx - 1])

            skeleton_map[skeleton_id] = skeleton

        # Make videos from "images"

        # Remove images that aren't referenced in the annotations
        img_refs = [annotation["image_id"] for annotation in dicts["annotations"]]
        dicts["images"] = list(filter(lambda im: im["id"] in img_refs, dicts["images"]))

        # Key in JSON file should be "file_name", but sometimes it's "filename",
        # so we have to check both.
        img_filename_key = "file_name"
        if img_filename_key not in dicts["images"][0].keys():
            img_filename_key = "filename"

        # First add the img_dir to each image filename
        img_paths = [
            os.path.join(img_dir, image[img_filename_key]) for image in dicts["images"]
        ]

        # See if there are any missing files
        img_missing = [not os.path.exists(path) for path in img_paths]

        if sum(img_missing):
            if use_missing_gui:
                okay = MissingFilesDialog(img_paths, img_missing).exec_()

                if not okay:
                    return None
            else:
                raise FileNotFoundError(
                    f"Images for COCO dataset could not be found in {img_dir}."
                )

        # Update the image paths (with img_dir or user selected path)
        for image, path in zip(dicts["images"], img_paths):
            image[img_filename_key] = path

        # Create the video objects for the image files
        image_video_map = dict()

        vid_id_video_map = dict()
        for image in dicts["images"]:
            image_id = image["id"]
            image_filename = image[img_filename_key]

            # Sometimes images have a vid_id which links multiple images
            # together as one video. If so, we'll use that as the video key.
            # But if there isn't a vid_id, we'll treat each images as a
            # distinct video and use the image id as the video id.
            vid_id = image.get("vid_id", image_id)

            if vid_id not in vid_id_video_map:
                kwargs = dict(filenames=[image_filename])
                for key in ("width", "height"):
                    if key in image:
                        kwargs[key] = image[key]

                video = Video.from_image_filenames(**kwargs)
                vid_id_video_map[vid_id] = video
                frame_idx = 0
            else:
                video = vid_id_video_map[vid_id]
                frame_idx = video.num_frames
                video.backend.filenames.append(image_filename)

            image_video_map[image_id] = (video, frame_idx)

        # Make instances from "annotations"
        lf_map = dict()
        track_map = dict()
        for annotation in dicts["annotations"]:
            skeleton = skeleton_map[annotation["category_id"]]
            image_id = annotation["image_id"]
            video, frame_idx = image_video_map[image_id]
            keypoints = np.array(annotation["keypoints"], dtype="int").reshape(-1, 3)

            track = None
            if "track_id" in annotation:
                track_id = annotation["track_id"]
                if track_id not in track_map:
                    track_map[track_id] = Track(frame_idx, str(track_id))
                track = track_map[track_id]

            points = dict()
            any_visible = False
            for i in range(len(keypoints)):
                node = skeleton.nodes[i]
                x, y, flag = keypoints[i]

                if flag == 0:
                    # node not labeled for this instance
                    continue

                is_visible = flag == 2
                any_visible = any_visible or is_visible
                points[node] = Point(x, y, is_visible)

            if points:
                # If none of the points had 2 has the "visible" flag, we'll
                # assume this incorrect and just mark all as visible.
                if not any_visible:
                    for point in points.values():
                        point.visible = True

                inst = Instance(skeleton=skeleton, points=points, track=track)

                if image_id not in lf_map:
                    lf_map[image_id] = LabeledFrame(video, frame_idx)

                lf_map[image_id].insert(0, inst)

        return Labels(labeled_frames=list(lf_map.values()))
コード例 #25
0
    def read(
        cls, file: FileHandle, gui: bool = True, *args, **kwargs,
    ):
        filename = file.filename

        mat_contents = sio.loadmat(filename)

        box_path = cls._unwrap_mat_scalar(mat_contents["boxPath"])

        # If the video file isn't found, try in the same dir as the mat file
        if not os.path.exists(box_path):
            file_dir = os.path.dirname(filename)
            box_path_name = box_path.split("\\")[-1]  # assume windows path
            box_path = os.path.join(file_dir, box_path_name)

        if not os.path.exists(box_path):
            if gui:
                video_paths = [box_path]
                missing = [True]
                okay = MissingFilesDialog(video_paths, missing).exec_()

                if not okay or missing[0]:
                    return

                box_path = video_paths[0]
            else:
                # Ignore missing videos if not loading from gui
                box_path = ""

        if os.path.exists(box_path):
            vid = Video.from_hdf5(
                dataset="box", filename=box_path, input_format="channels_first"
            )
        else:
            vid = None

        nodes_ = mat_contents["skeleton"]["nodes"]
        edges_ = mat_contents["skeleton"]["edges"]
        points_ = mat_contents["positions"]

        edges_ = edges_ - 1  # convert matlab 1-indexing to python 0-indexing

        nodes = cls._unwrap_mat_array(nodes_)
        edges = cls._unwrap_mat_array(edges_)

        nodes = list(map(str, nodes))  # convert np._str to str

        sk = Skeleton(name=filename)
        sk.add_nodes(nodes)
        for edge in edges:
            sk.add_edge(source=nodes[edge[0]], destination=nodes[edge[1]])

        labeled_frames = []
        node_count, _, frame_count = points_.shape

        for i in range(frame_count):
            new_inst = Instance(skeleton=sk)
            for node_idx, node in enumerate(nodes):
                x = points_[node_idx][0][i]
                y = points_[node_idx][1][i]
                new_inst[node] = Point(x, y)
            if len(new_inst.points):
                new_frame = LabeledFrame(video=vid, frame_idx=i)
                new_frame.instances = (new_inst,)
                labeled_frames.append(new_frame)

        labels = Labels(labeled_frames=labeled_frames, videos=[vid], skeletons=[sk])

        return labels
コード例 #26
0
ファイル: test_dataset.py プロジェクト: JensBlack/sleap
def test_complex_merge():
    dummy_video_a = Video.from_filename("foo.mp4")
    dummy_video_b = Video.from_filename("foo.mp4")

    dummy_skeleton_a = Skeleton()
    dummy_skeleton_a.add_node("node")

    dummy_skeleton_b = Skeleton()
    dummy_skeleton_b.add_node("node")

    dummy_instances_a = []
    dummy_instances_a.append(
        Instance(skeleton=dummy_skeleton_a, points=dict(node=Point(1, 1))))
    dummy_instances_a.append(
        Instance(skeleton=dummy_skeleton_a, points=dict(node=Point(2, 2))))

    labels_a = Labels()
    labels_a.append(
        LabeledFrame(dummy_video_a, frame_idx=0, instances=dummy_instances_a))

    dummy_instances_b = []
    dummy_instances_b.append(
        Instance(skeleton=dummy_skeleton_b, points=dict(node=Point(1, 1))))
    dummy_instances_b.append(
        Instance(skeleton=dummy_skeleton_b, points=dict(node=Point(3, 3))))

    labels_b = Labels()
    labels_b.append(
        LabeledFrame(dummy_video_b, frame_idx=0,
                     instances=dummy_instances_b))  # conflict
    labels_b.append(
        LabeledFrame(dummy_video_b, frame_idx=1,
                     instances=dummy_instances_b))  # clean

    merged, extra_a, extra_b = Labels.complex_merge_between(labels_a, labels_b)

    # Check that we have the cleanly merged frame
    assert dummy_video_a in merged
    assert len(merged[dummy_video_a]) == 1  # one merged frame
    assert len(merged[dummy_video_a][1]) == 2  # with two instances

    # Check that labels_a includes redundant and clean
    assert len(labels_a.labeled_frames) == 2
    assert len(labels_a.labeled_frames[0].instances) == 1
    assert labels_a.labeled_frames[0].instances[0].points[0].x == 1
    assert len(labels_a.labeled_frames[1].instances) == 2
    assert labels_a.labeled_frames[1].instances[0].points[0].x == 1
    assert labels_a.labeled_frames[1].instances[1].points[0].x == 3

    # Check that extra_a/b includes the appropriate conflicting instance
    assert len(extra_a) == 1
    assert len(extra_b) == 1
    assert len(extra_a[0].instances) == 1
    assert len(extra_b[0].instances) == 1
    assert extra_a[0].instances[0].points[0].x == 2
    assert extra_b[0].instances[0].points[0].x == 3

    # Check that objects were unified
    assert extra_a[0].video == extra_b[0].video

    # Check resolving the conflict using new
    Labels.finish_complex_merge(labels_a, extra_b)
    assert len(labels_a.labeled_frames) == 2
    assert len(labels_a.labeled_frames[0].instances) == 2
    assert labels_a.labeled_frames[0].instances[1].points[0].x == 3
コード例 #27
0
import numpy as np
import pytest

from sleap.instance import Point, PredictedPoint, PointArray, PredictedPointArray


@pytest.mark.parametrize(
    "p1",
    [
        Point(0.0, 0.0),
        PredictedPoint(0.0, 0.0, 0.0),
        PointArray(3)[0],
        PredictedPointArray(3)[0],
    ],
)
def test_point(p1):
    """
    Test the Point and PredictedPoint API. This is mainly a safety
    check to make sure numpy record array stuff doesn't change
    """

    # Make sure we are getting Points or PredictedPoints only.
    # This makes sure that PointArray(3)[0] returns a point for
    # example
    assert type(p1) in [PredictedPoint, Point]

    # Check getters and setters
    p1.x = 3.0
    assert p1.x == 3.0

    if type(p1) is PredictedPoint: