Example #1
0
 def compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate):
     if fps is None:
         # if for some reason the video doesn't have fps (because doesn't have a video stream)
         # set the fps to 1. The value doesn't matter, because video_pts is empty anyway
         fps = 1
     if frame_rate is None:
         frame_rate = fps
     total_frames = len(video_pts) * (float(frame_rate) / fps)
     # idxs = VideoClips._resample_video_idx(int(math.floor(total_frames)), fps, frame_rate)
     idxs = VideoClipsFast._resample_video_idx(
         int(math.floor(total_frames)), fps, frame_rate)
     video_pts = video_pts[idxs]
     clips = unfold(video_pts, num_frames, step)
     if isinstance(idxs, slice):
         idxs = [idxs] * len(clips)
     else:
         idxs = unfold(idxs, num_frames, step)
     return clips, idxs
Example #2
0
    def test_unfold(self):
        a = torch.arange(7)

        r = unfold(a, 3, 3, 1)
        expected = torch.tensor([
            [0, 1, 2],
            [3, 4, 5],
        ])
        assert_equal(r, expected)

        r = unfold(a, 3, 2, 1)
        expected = torch.tensor([[0, 1, 2], [2, 3, 4], [4, 5, 6]])
        assert_equal(r, expected)

        r = unfold(a, 3, 2, 2)
        expected = torch.tensor([
            [0, 2, 4],
            [2, 4, 6],
        ])
        assert_equal(r, expected)
 def __call__(self, frame_indices):
     frame_indices = Tensor(frame_indices)
     out = unfold(frame_indices, self.size, self.step, self.dilatation)
     out = out[:self.n_samples].int().tolist()
     return out