def read(cls, file: FileHandle, *args, **kwargs,) -> Labels: filename = file.filename # Load data from the YAML file project_data = yaml.load(file.text, Loader=yaml.SafeLoader) # Create skeleton which we'll use for each video skeleton = Skeleton() skeleton.add_nodes(project_data["bodyparts"]) # Get subdirectories of videos and labeled data root_dir = os.path.dirname(filename) videos_dir = os.path.join(root_dir, "videos") labeled_data_dir = os.path.join(root_dir, "labeled-data") with os.scandir(labeled_data_dir) as file_iterator: data_subdirs = [file.path for file in file_iterator if file.is_dir()] labeled_frames = [] # Each subdirectory of labeled data corresponds to a video. # We'll go through each and import the labeled frames. for data_subdir in data_subdirs: csv_files = find_files_by_suffix( data_subdir, prefix="CollectedData", suffix=".csv" ) if csv_files: csv_path = csv_files[0] # Try to find a full video corresponding to this subdir. # If subdirectory is foo, we look for foo.mp4 in videos dir. shortname = os.path.split(data_subdir)[-1] video_path = os.path.join(videos_dir, f"{shortname}.mp4") if os.path.exists(video_path): video = Video.from_filename(video_path) else: # When no video is found, the individual frame images # stored in the labeled data subdir will be used. print( f"Unable to find {video_path} so using individual frame images." ) video = None # Import the labeled fraems labeled_frames.extend( LabelsDeepLabCutCsvAdaptor.read_frames( FileHandle(csv_path), full_video=video, skeleton=skeleton ) ) else: print(f"No csv data file found in {data_subdir}") return Labels(labeled_frames=labeled_frames)
def demo_receptive_field(): app = QtWidgets.QApplication([]) video = Video.from_filename("tests/data/videos/centered_pair_small.mp4") win = ReceptiveFieldImageWidget() win.setImage(video.get_frame(0)) win._set_field_size(50) win.show() app.exec_()
def read( cls, file: FileHandle, video_path: str, skeleton_path: str, *args, **kwargs, ) -> Labels: f = file.file video = Video.from_filename(video_path) skeleton_data = pd.read_csv(skeleton_path, header=0) skeleton = Skeleton() skeleton.add_nodes(skeleton_data["name"]) nodes = skeleton.nodes for name, parent, swap in skeleton_data.itertuples(index=False, name=None): if parent is not np.nan: skeleton.add_edge(parent, name) lfs = [] pose_matrix = f["pose"][:] track_count, frame_count, node_count, _ = pose_matrix.shape tracks = [Track(0, f"Track {i}") for i in range(track_count)] for frame_idx in range(frame_count): lf_instances = [] for track_idx in range(track_count): points_array = pose_matrix[track_idx, frame_idx, :, :] points = dict() for p in range(len(points_array)): x, y, score = points_array[p] points[nodes[p]] = Point(x, y) # TODO: score inst = Instance(skeleton=skeleton, track=tracks[track_idx], points=points) lf_instances.append(inst) lfs.append( LabeledFrame(video, frame_idx=frame_idx, instances=lf_instances)) return Labels(labeled_frames=lfs)
def read( cls, file: FileHandle, video: Union[Video, str], *args, **kwargs, ) -> Labels: connect_adj_nodes = False if video is None: raise ValueError( "Cannot read analysis hdf5 if no video specified.") if not isinstance(video, Video): video = Video.from_filename(video) f = file.file tracks_matrix = f["tracks"][:].T track_names_list = f["track_names"][:].T node_names_list = f["node_names"][:].T # shape: frames * nodes * 2 * tracks frame_count, node_count, _, track_count = tracks_matrix.shape tracks = [ Track(0, track_name.decode()) for track_name in track_names_list ] skeleton = Skeleton() last_node_name = None for node_name in node_names_list: node_name = node_name.decode() skeleton.add_node(node_name) if connect_adj_nodes and last_node_name: skeleton.add_edge(last_node_name, node_name) last_node_name = node_name frames = [] for frame_idx in range(frame_count): instances = [] for track_idx in range(track_count): points = tracks_matrix[frame_idx, ..., track_idx] if not np.all(np.isnan(points)): point_scores = np.ones(len(points)) # make everything a PredictedInstance since the usual use # case is to export predictions for analysis instances.append( PredictedInstance.from_arrays( points=points, point_confidences=point_scores, skeleton=skeleton, track=tracks[track_idx], instance_score=1, )) if instances: frames.append( LabeledFrame(video=video, frame_idx=frame_idx, instances=instances)) return Labels(labeled_frames=frames)