コード例 #1
0
    def write(
        cls,
        filename: str,
        source_object: str,
        compress: Optional[bool] = None,
        save_frame_data: bool = False,
        frame_data_format: str = "png",
    ):
        """
        Save a Labels instance to a JSON format.

        Args:
            filename: The filename to save the data to.
            source_object: The labels dataset to save.
            compress: Whether the data be zip compressed or not? If True,
                the JSON will be compressed using Python's shutil.make_archive
                command into a PKZIP zip file. If compress is True then
                filename will have a .zip appended to it.
            save_frame_data: Whether to save the image data for each frame.
                For each video in the dataset, all frames that have labels
                will be stored as an imgstore dataset.
                If save_frame_data is True then compress will be forced to True
                since the archive must contain both the JSON data and image
                data stored in ImgStores.
            frame_data_format: If save_frame_data is True, then this argument
                is used to set the data format to use when writing frame
                data to ImgStore objects. Supported formats should be:

                 * 'pgm',
                 * 'bmp',
                 * 'ppm',
                 * 'tif',
                 * 'png',
                 * 'jpg',
                 * 'npy',
                 * 'mjpeg/avi',
                 * 'h264/mkv',
                 * 'avc1/mp4'

                 Note: 'h264/mkv' and 'avc1/mp4' require separate installation
                 of these codecs on your system. They are excluded from SLEAP
                 because of their GPL license.

        Returns:
            None
        """

        labels = source_object

        if compress is None:
            compress = filename.endswith(".zip")

        # Lets make a temporary directory to store the image frame data or pre-compressed json
        # in case we need it.
        with tempfile.TemporaryDirectory() as tmp_dir:

            # If we are saving frame data along with the datasets. We will replace videos with
            # new video object that represent video data from just the labeled frames.
            if save_frame_data:

                # Create a set of new Video objects with imgstore backends. One for each
                # of the videos. We will only include the labeled frames though. We will
                # then replace each video with this new video
                new_videos = labels.save_frame_data_imgstore(
                    output_dir=tmp_dir, format=frame_data_format)

                # Make video paths relative
                for vid in new_videos:
                    tmp_path = vid.filename
                    # Get the parent dir of the YAML file.
                    # Use "/" since this works on Windows and posix
                    img_store_dir = (
                        os.path.basename(os.path.split(tmp_path)[0]) + "/" +
                        os.path.basename(tmp_path))
                    # Change to relative path
                    vid.backend.filename = img_store_dir

                # Convert to a dict, not JSON yet, because we need to patch up the videos
                d = labels.to_dict()
                d["videos"] = Video.cattr().unstructure(new_videos)

            else:
                d = labels.to_dict()

            # Set file format version
            d["format_id"] = cls.FORMAT_ID

            if compress or save_frame_data:

                # Ensure that filename ends with .json
                # shutil will append .zip
                filename = re.sub("(\\.json)?(\\.zip)?$", ".json", filename)

                # Write the json to the tmp directory, we will zip it up with the frame data.
                full_out_filename = os.path.join(tmp_dir,
                                                 os.path.basename(filename))
                json_dumps(d, full_out_filename)

                # Create the archive
                shutil.make_archive(base_name=filename,
                                    root_dir=tmp_dir,
                                    format="zip")

            # If the user doesn't want to compress, then just write the json to the filename
            else:
                json_dumps(d, filename)
コード例 #2
0
    def from_json_data(cls,
                       data: Union[str, dict],
                       match_to: Optional["Labels"] = None) -> "Labels":
        """
        Create instance of class from data in dictionary.

        Method is used by other methods that load from JSON.

        Args:
            data: Dictionary, deserialized from JSON.
            match_to: If given, we'll replace particular objects in the
                data dictionary with *matching* objects in the match_to
                :class:`Labels` object. This ensures that the newly
                instantiated :class:`Labels` can be merged without
                duplicate matching objects (e.g., :class:`Video` objects ).
        Returns:
            A new :class:`Labels` object.
        """

        # Parse the json string if needed.
        if type(data) is str:
            dicts = json_loads(data)
        else:
            dicts = data

        dicts["tracks"] = dicts.get(
            "tracks", [])  # don't break if json doesn't include tracks

        # First, deserialize the skeletons, videos, and nodes lists.
        # The labels reference these so we will need them while deserializing.
        nodes = cattr.structure(dicts["nodes"], List[Node])

        idx_to_node = {i: nodes[i] for i in range(len(nodes))}
        skeletons = Skeleton.make_cattr(idx_to_node).structure(
            dicts["skeletons"], List[Skeleton])
        videos = Video.cattr().structure(dicts["videos"], List[Video])

        try:
            # First try unstructuring tuple (newer format)
            track_cattr = cattr.Converter(
                unstruct_strat=cattr.UnstructureStrategy.AS_TUPLE)
            tracks = track_cattr.structure(dicts["tracks"], List[Track])
        except:
            # Then try unstructuring dict (older format)
            try:
                tracks = cattr.structure(dicts["tracks"], List[Track])
            except:
                raise ValueError("Unable to load tracks as tuple or dict!")

        # if we're given a Labels object to match, use its objects when they match
        if match_to is not None:
            for idx, sk in enumerate(skeletons):
                for old_sk in match_to.skeletons:
                    if sk.matches(old_sk):
                        # use nodes from matched skeleton
                        for (node, match_node) in zip(sk.nodes, old_sk.nodes):
                            node_idx = nodes.index(node)
                            nodes[node_idx] = match_node
                        # use skeleton from match
                        skeletons[idx] = old_sk
                        break
            for idx, vid in enumerate(videos):
                for old_vid in match_to.videos:

                    # Try to match videos using either their current or source filename
                    # if available.
                    old_vid_paths = [old_vid.filename]
                    if getattr(old_vid.backend, "has_embedded_images", False):
                        old_vid_paths.append(
                            old_vid.backend._source_video.filename)

                    new_vid_paths = [vid.filename]
                    if getattr(vid.backend, "has_embedded_images", False):
                        new_vid_paths.append(
                            vid.backend._source_video.filename)

                    is_match = False
                    for old_vid_path in old_vid_paths:
                        for new_vid_path in new_vid_paths:
                            if old_vid_path == new_vid_path or weak_filename_match(
                                    old_vid_path, new_vid_path):
                                is_match = True
                                videos[idx] = old_vid
                                break
                        if is_match:
                            break
                    if is_match:
                        break

        suggestions = []
        if "suggestions" in dicts:
            suggestions_cattr = cattr.Converter()
            suggestions_cattr.register_structure_hook(
                Video, lambda x, type: videos[int(x)])
            try:
                suggestions = suggestions_cattr.structure(
                    dicts["suggestions"], List[SuggestionFrame])
            except Exception as e:
                print("Error while loading suggestions (1)")
                print(e)

                try:
                    # Convert old suggestion format to new format.
                    # Old format: {video: list of frame indices}
                    # New format: [SuggestionFrames]
                    old_suggestions = suggestions_cattr.structure(
                        dicts["suggestions"], Dict[Video, List])
                    for video in old_suggestions.keys():
                        suggestions.extend([
                            SuggestionFrame(video, idx)
                            for idx in old_suggestions[video]
                        ])
                except Exception as e:
                    print("Error while loading suggestions (2)")
                    print(e)
                    pass

        if "negative_anchors" in dicts:
            negative_anchors_cattr = cattr.Converter()
            negative_anchors_cattr.register_structure_hook(
                Video, lambda x, type: videos[int(x)])
            negative_anchors = negative_anchors_cattr.structure(
                dicts["negative_anchors"], Dict[Video, List])
        else:
            negative_anchors = dict()

        if "provenance" in dicts:
            provenance = dicts["provenance"]
        else:
            provenance = dict()

        # If there is actual labels data, get it.
        if "labels" in dicts:
            label_cattr = make_instance_cattr()
            label_cattr.register_structure_hook(
                Skeleton, lambda x, type: skeletons[int(x)])
            label_cattr.register_structure_hook(Video,
                                                lambda x, type: videos[int(x)])
            label_cattr.register_structure_hook(
                Node, lambda x, type: x
                if isinstance(x, Node) else nodes[int(x)])
            label_cattr.register_structure_hook(
                Track, lambda x, type: None if x is None else tracks[int(x)])

            labels = label_cattr.structure(dicts["labels"], List[LabeledFrame])
        else:
            labels = []

        return Labels(
            labeled_frames=labels,
            videos=videos,
            skeletons=skeletons,
            nodes=nodes,
            suggestions=suggestions,
            negative_anchors=negative_anchors,
            tracks=tracks,
            provenance=provenance,
        )
コード例 #3
0
ファイル: hdf5.py プロジェクト: stallam-unb/sleap
    def write(
        cls,
        filename: str,
        source_object: object,
        append: bool = False,
        save_frame_data: bool = False,
        frame_data_format: str = "png",
        all_labeled: bool = False,
        suggested: bool = False,
    ):

        labels = source_object

        # Delete the file if it exists, we want to start from scratch since
        # h5py truncates the file which seems to not actually delete data
        # from the file. Don't if we are appending of course.
        if os.path.exists(filename) and not append:
            os.unlink(filename)

        # Serialize all the meta-data to JSON.
        d = labels.to_dict(skip_labels=True)

        if save_frame_data:
            new_videos = labels.save_frame_data_hdf5(
                filename,
                format=frame_data_format,
                user_labeled=True,
                all_labeled=all_labeled,
                suggested=suggested,
            )

            # Replace path to video file with "." (which indicates that the
            # video is in the same file as the HDF5 labels dataset).
            # Otherwise, the video paths will break if the HDF5 labels
            # dataset file is moved.
            for vid in new_videos:
                vid.backend.filename = "."

            d["videos"] = Video.cattr().unstructure(new_videos)

        with h5py.File(filename, "a") as f:

            # Add all the JSON metadata
            meta_group = f.require_group("metadata")

            meta_group.attrs["format_id"] = cls.FORMAT_ID

            # If we are appending and there already exists JSON metadata
            if append and "json" in meta_group.attrs:

                # Otherwise, we need to read the JSON and append to the lists
                old_labels = labels_json.LabelsJsonAdaptor.from_json_data(
                    meta_group.attrs["json"].tostring().decode()
                )

                # A function to join to list but only include new non-dupe entries
                # from the right hand list.
                def append_unique(old, new):
                    unique = []
                    for x in new:
                        try:
                            matches = [y.matches(x) for y in old]
                        except AttributeError:
                            matches = [x == y for y in old]

                        # If there were no matches, this is a unique object.
                        if sum(matches) == 0:
                            unique.append(x)
                        else:
                            # If we have an object that matches, replace the instance
                            # with the one from the new list. This will will make sure
                            # objects on the Instances are the same as those in the
                            # Labels lists.
                            for i, match in enumerate(matches):
                                if match:
                                    old[i] = x

                    return old + unique

                # Append the lists
                labels.tracks = append_unique(old_labels.tracks, labels.tracks)
                labels.skeletons = append_unique(old_labels.skeletons, labels.skeletons)
                labels.videos = append_unique(old_labels.videos, labels.videos)
                labels.nodes = append_unique(old_labels.nodes, labels.nodes)

                # FIXME: Do something for suggestions and negative_anchors

                # Get the dict for JSON and save it over the old data
                d = labels.to_dict(skip_labels=True)

            if not append:
                # These items are stored in separate lists because the metadata
                # group got to be too big.
                for key in ("videos", "tracks", "suggestions"):
                    # Convert for saving in hdf5 dataset
                    data = [np.string_(json_dumps(item)) for item in d[key]]

                    hdf5_key = f"{key}_json"

                    # Save in its own dataset (e.g., videos_json)
                    f.create_dataset(hdf5_key, data=data, maxshape=(None,))

                    # Clear from dict since we don't want to save this in attribute
                    d[key] = []

            # Output the dict to JSON
            meta_group.attrs["json"] = np.string_(json_dumps(d))

            # FIXME: We can probably construct these from attrs fields
            # We will store Instances and PredcitedInstances in the same
            # table. instance_type=0 or Instance and instance_type=1 for
            # PredictedInstance, score will be ignored for Instances.
            instance_dtype = np.dtype(
                [
                    ("instance_id", "i8"),
                    ("instance_type", "u1"),
                    ("frame_id", "u8"),
                    ("skeleton", "u4"),
                    ("track", "i4"),
                    ("from_predicted", "i8"),
                    ("score", "f4"),
                    ("point_id_start", "u8"),
                    ("point_id_end", "u8"),
                ]
            )
            frame_dtype = np.dtype(
                [
                    ("frame_id", "u8"),
                    ("video", "u4"),
                    ("frame_idx", "u8"),
                    ("instance_id_start", "u8"),
                    ("instance_id_end", "u8"),
                ]
            )

            num_instances = len(labels.all_instances)
            max_skeleton_size = max([len(s.nodes) for s in labels.skeletons], default=0)

            # Initialize data arrays for serialization
            points = np.zeros(num_instances * max_skeleton_size, dtype=Point.dtype)
            pred_points = np.zeros(
                num_instances * max_skeleton_size, dtype=PredictedPoint.dtype
            )
            instances = np.zeros(num_instances, dtype=instance_dtype)
            frames = np.zeros(len(labels), dtype=frame_dtype)

            # Pre compute some structures to make serialization faster
            skeleton_to_idx = {
                skeleton: labels.skeletons.index(skeleton)
                for skeleton in labels.skeletons
            }
            track_to_idx = {
                track: labels.tracks.index(track) for track in labels.tracks
            }
            track_to_idx[None] = -1
            video_to_idx = {
                video: labels.videos.index(video) for video in labels.videos
            }
            instance_type_to_idx = {Instance: 0, PredictedInstance: 1}

            # Each instance we create will have and index in the dataset, keep track of
            # these so we can quickly add from_predicted links on a second pass.
            instance_to_idx = {}
            instances_with_from_predicted = []
            instances_from_predicted = []

            # If we are appending, we need look inside to see what frame, instance, and
            # point ids we need to start from. This gives us offsets to use.
            if append and "points" in f:
                point_id_offset = f["points"].shape[0]
                pred_point_id_offset = f["pred_points"].shape[0]
                instance_id_offset = f["instances"][-1]["instance_id"] + 1
                frame_id_offset = int(f["frames"][-1]["frame_id"]) + 1
            else:
                point_id_offset = 0
                pred_point_id_offset = 0
                instance_id_offset = 0
                frame_id_offset = 0

            point_id = 0
            pred_point_id = 0
            instance_id = 0

            for frame_id, label in enumerate(labels):
                frames[frame_id] = (
                    frame_id + frame_id_offset,
                    video_to_idx[label.video],
                    label.frame_idx,
                    instance_id + instance_id_offset,
                    instance_id + instance_id_offset + len(label.instances),
                )
                for instance in label.instances:

                    # Add this instance to our lookup structure we will need for
                    # from_predicted links
                    instance_to_idx[instance] = instance_id

                    parray = instance.get_points_array(copy=False, full=True)
                    instance_type = type(instance)

                    # Check whether we are working with a PredictedInstance or an
                    # Instance.
                    if instance_type is PredictedInstance:
                        score = instance.score
                        pid = pred_point_id + pred_point_id_offset
                    else:
                        score = np.nan
                        pid = point_id + point_id_offset

                        # Keep track of any from_predicted instance links, we will
                        # insert the correct instance_id in the dataset after we are
                        # done.
                        if instance.from_predicted:
                            instances_with_from_predicted.append(instance_id)
                            instances_from_predicted.append(instance.from_predicted)

                    # Copy all the data
                    instances[instance_id] = (
                        instance_id + instance_id_offset,
                        instance_type_to_idx[instance_type],
                        frame_id,
                        skeleton_to_idx[instance.skeleton],
                        track_to_idx[instance.track],
                        -1,
                        score,
                        pid,
                        pid + len(parray),
                    )

                    # If these are predicted points, copy them to the predicted point
                    # array otherwise, use the normal point array
                    if type(parray) is PredictedPointArray:
                        pred_points[
                            pred_point_id : (pred_point_id + len(parray))
                        ] = parray
                        pred_point_id = pred_point_id + len(parray)
                    else:
                        points[point_id : (point_id + len(parray))] = parray
                        point_id = point_id + len(parray)

                    instance_id = instance_id + 1

            # Add from_predicted links
            for instance_id, from_predicted in zip(
                instances_with_from_predicted, instances_from_predicted
            ):
                try:
                    instances[instance_id]["from_predicted"] = instance_to_idx[
                        from_predicted
                    ]
                except KeyError:
                    # If we haven't encountered the from_predicted instance yet then
                    # don't save the link. It's possible for a user to create a regular
                    # instance from a predicted instance and then delete all predicted
                    # instances from the file, but in this case I don’t think there's
                    # any reason to remember which predicted instance the regular
                    # instance came from.
                    pass

            # We pre-allocated our points array with max possible size considering the
            # max skeleton size, drop any unused points.
            points = points[0:point_id]
            pred_points = pred_points[0:pred_point_id]

            # Create datasets if we need to
            if append and "points" in f:
                f["points"].resize((f["points"].shape[0] + points.shape[0]), axis=0)
                f["points"][-points.shape[0] :] = points
                f["pred_points"].resize(
                    (f["pred_points"].shape[0] + pred_points.shape[0]), axis=0
                )
                f["pred_points"][-pred_points.shape[0] :] = pred_points
                f["instances"].resize(
                    (f["instances"].shape[0] + instances.shape[0]), axis=0
                )
                f["instances"][-instances.shape[0] :] = instances
                f["frames"].resize((f["frames"].shape[0] + frames.shape[0]), axis=0)
                f["frames"][-frames.shape[0] :] = frames
            else:
                f.create_dataset(
                    "points", data=points, maxshape=(None,), dtype=Point.dtype
                )
                f.create_dataset(
                    "pred_points",
                    data=pred_points,
                    maxshape=(None,),
                    dtype=PredictedPoint.dtype,
                )
                f.create_dataset(
                    "instances", data=instances, maxshape=(None,), dtype=instance_dtype
                )
                f.create_dataset(
                    "frames", data=frames, maxshape=(None,), dtype=frame_dtype
                )