示例#1
0
def autoTrack(clip, pattern, tt=None, fps=None, radius=20, xy0=None):
    """Tracks a given pattern (small image array) in a video clip.

    Returns [(x1,y1),(x2,y2)...] where xi,yi are the coordinates of the pattern in the
    clip on frame i. To select the frames you can either specify a list of times with
    ``tt`` or select a frame rate with ``fps``.

    This algorithm assumes that the pattern's aspect does not vary much and that the
    distance between two occurences of the pattern in two consecutive frames is smaller
    than ``radius`` (if you set ``radius`` to -1 the pattern will be searched in the
    whole screen at each frame). You can also provide the original position of the
    pattern with xy0.
    """
    if not autotracking_possible:
        raise IOError("Sorry, autotrack requires OpenCV for the moment. "
                      "Install OpenCV (aka cv2) to use it.")

    if not xy0:
        xy0 = findAround(clip.get_frame(tt[0]), pattern)

    if tt is None:
        tt = np.arange(0, clip.duration, 1.0 / fps)

    xys = [xy0]
    for t in tt[1:]:
        xys.append(findAround(clip.get_frame(t), pattern, xy=xys[-1],
                              r=radius))

    xx, yy = zip(*xys)

    return Trajectory(tt, xx, yy)
示例#2
0
def test_Trajectory_from_to_file(util):
    filename = os.path.join(util.TMP_DIR,
                            "moviepy_Trajectory_from_to_file.txt")
    if os.path.isfile(filename):
        try:
            os.remove(filename)
        except PermissionError:
            pass

    trajectory_file_content = """# t(ms)	x	y
0	554	100
166	474	90
333	384	91
"""

    with open(filename, "w") as f:
        f.write(trajectory_file_content)

    trajectory = Trajectory.from_file(filename)

    assert np.array_equal(trajectory.xx, np.array([554, 474, 384]))
    assert np.array_equal(trajectory.yy, np.array([100, 90, 91]))
    assert np.array_equal(trajectory.tt, np.array([0, 0.166, 0.333]))

    trajectory.to_file(filename)

    with open(filename, "r") as f:
        assert f.read() == "\n".join(trajectory_file_content.split("\n")[1:])
示例#3
0
def test_PR_373():
    result = Trajectory.load_list("media/traj.txt")

    Trajectory.save_list(result, os.path.join(TMP_DIR, "traj1.txt"))

    result1 = Trajectory.load_list(os.path.join(TMP_DIR,"traj1.txt"))

    assert len(result[0].tt) == len(result1[0].tt)
    for i in range(len(result[0].tt)):
        assert result[0].tt[i] == result1[0].tt[i]

    assert len(result[0].xx) == len(result1[0].xx)
    for i in range(len(result[0].xx)):
        assert result[0].xx[i] == result1[0].xx[i]

    assert len(result[0].yy) == len(result1[0].yy)
    for i in range(len(result[0].yy)):
        assert result[0].yy[i] == result1[0].yy[i]
示例#4
0
def test_PR_373():
    result = Trajectory.load_list("media/traj.txt")

    Trajectory.save_list(result, os.path.join(TMP_DIR, "traj1.txt"))

    result1 = Trajectory.load_list(os.path.join(TMP_DIR, "traj1.txt"))

    assert len(result[0].tt) == len(result1[0].tt)
    for i in range(len(result[0].tt)):
        assert result[0].tt[i] == result1[0].tt[i]

    assert len(result[0].xx) == len(result1[0].xx)
    for i in range(len(result[0].xx)):
        assert result[0].xx[i] == result1[0].xx[i]

    assert len(result[0].yy) == len(result1[0].yy)
    for i in range(len(result[0].yy)):
        assert result[0].yy[i] == result1[0].yy[i]
示例#5
0
def autoTrack(clip, pattern, tt=None, fps=None, radius=20, xy0=None):
    """Tracks a given pattern (small image array) in a video clip.

    Returns ``[(x1, y1), (x2, y2)...]`` where ``(xi, yi)`` are the coordinates
    of the pattern in the clip on frame ``i``. To select the frames you can
    either specify a list of times with ``tt`` or select a frame rate with
    ``fps``.

    This algorithm assumes that the pattern's aspect does not vary much and
    that the distance between two occurences of the pattern in two consecutive
    frames is smaller than ``radius`` (if you set ``radius`` to -1 the pattern
    will be searched in the whole screen at each frame). You can also provide
    the original position of the pattern with xy0.

    Parameters
    ----------

    clip : video.VideoClip.VideoClip
      MoviePy video clip to track.

    pattern : numpy.ndarray
      Image to search inside the clip frames.

    tt : numpy.ndarray, optional
      Time frames used for auto tracking. As default is used the clip time
      frames according to its fps.

    fps : int, optional
      Overwrites fps value used computing time frames. As default, clip's fps.

    radius : int, optional
      Maximum radius to search looking for the pattern. Setted to ``-1``,
      the pattern will be searched in the whole screen at each frame.

    xy0 : tuple or list, optional
      Original position of the pattern. If not provided, will be taken from the
      first tracked frame of the clip.
    """
    if not autotracking_possible:
        raise IOError("Sorry, autotrack requires OpenCV for the moment. "
                      "Install OpenCV (aka cv2) to use it.")

    if not xy0:
        xy0 = findAround(clip.get_frame(tt[0]), pattern)

    if tt is None:
        tt = np.arange(0, clip.duration, 1.0 / fps)

    xys = [xy0]
    for t in tt[1:]:
        xys.append(findAround(clip.get_frame(t), pattern, xy=xys[-1],
                              r=radius))

    xx, yy = zip(*xys)

    return Trajectory(tt, xx, yy)
示例#6
0
clip = VideoFileClip("media/chaplin.mp4")

# MANUAL TRACKING OF THE HEAD

# the next line is for the manual tracking and its saving
# to a file, it must be commented once the tracking has been done
# (after the first run of the script for instance).
# Note that we save the list (ti, xi, yi), not the functions fx and fy

# manual_tracking(clip, fps=6, savefile="blurred_trajectory.txt")


# IF THE MANUAL TRACKING HAS BEEN PREVIOUSLY DONE,
# LOAD THE TRACKING DATA AND CONVERT IT TO TRAJECTORY INTERPOLATORS fx(t), fy(t)

traj = Trajectory.from_file("blurred_trajectory.txt")


# BLUR CHAPLIN'S HEAD IN THE CLIP PASSING xi(t) and yi(t) FUNCTIONS

clip_blurred = clip.fx(vfx.headblur, traj.xi, traj.yi, 25)


# Generate the text, put in on a grey background

txt = TextClip(
    "Hey you! \n You're blurry!",
    color="grey70",
    size=clip.size,
    bg_color="grey20",
    font="Century-Schoolbook-Italic",
示例#7
0
def manual_tracking(clip,
                    t1=None,
                    t2=None,
                    fps=None,
                    n_objects=1,
                    savefile=None):
    """Manual tracking of objects in videoclips using the mouse.

    Allows manual tracking of an object(s) in the video clip between
    times `t1` and `t2`. This displays the clip frame by frame
    and you must click on the object(s) in each frame. If ``t2=None``
    only the frame at ``t1`` is taken into account.

    Returns a list ``[(t1, x1, y1), (t2, x2, y2)...]`` if there is one
    object per frame, else returns a list whose elements are of the
    form ``(ti, [(xi1, yi1), (xi2, yi2)...])``.


    Parameters
    ----------

    clip : video.VideoClip.VideoClip
      MoviePy video clip to track.

    t1 : float or str or tuple, optional
      Start time to to track (defaults is start of the clip). Can be expressed
      in seconds like ``15.35``, in ``(min, sec)``, in ``(hour, min, sec)``,
      or as a string: ``"01:03:05.35"``.

    t2 : float or str or tuple, optional
      End time to to track (defaults is end of the clip). Can be expressed
      in seconds like ``15.35``, in ``(min, sec)``, in ``(hour, min, sec)``,
      or as a string: ``"01:03:05.35"``.

    fps : int, optional
      Number of frames per second to freeze on. If None, the clip's
      fps attribute is used instead.

    n_objects : int, optional
      Number of objects to click on each frame.

    savefile : str, optional
      If provided, the result is saved to a file, which makes it easier to edit
      and re-use later.


    Examples
    --------

    >>> from moviepy import VideoFileClip
    >>> from moviepy.video.tools.tracking import manual_tracking
    >>>
    >>> clip = VideoFileClip("media/chaplin.mp4")
    >>>
    >>> # manually indicate 3 trajectories, save them to a file
    >>> trajectories = manual_tracking(clip, start_time=5, t2=7, fps=5,
    ...                                nobjects=3, savefile="track.text")
    >>>
    >>> # ...
    >>> # later, in another script, recover these trajectories
    >>> from moviepy.video.tools.tracking import Trajectory
    >>>
    >>> traj1, traj2, traj3 = Trajectory.load_list('track.text')
    >>>
    >>> # If ever you only have one object being tracked, recover it with
    >>> traj, =  Trajectory.load_list('track.text')
    """
    import pygame as pg

    screen = pg.display.set_mode(clip.size)
    step = 1.0 / fps
    if (t1 is None) and (t2 is None):
        t1, t2 = 0, clip.duration
    elif t2 is None:
        t2 = t1 + step / 2
    t = t1
    txy_list = []

    def gatherClicks(t):

        imdisplay(clip.get_frame(t), screen)
        objects_to_click = n_objects
        clicks = []
        while objects_to_click:

            for event in pg.event.get():

                if event.type == pg.KEYDOWN:
                    if event.key == pg.K_BACKSLASH:
                        return "return"
                    elif event.key == pg.K_ESCAPE:
                        raise KeyboardInterrupt()

                elif event.type == pg.MOUSEBUTTONDOWN:
                    x, y = pg.mouse.get_pos()
                    clicks.append((x, y))
                    objects_to_click -= 1

        return clicks

    while t < t2:

        clicks = gatherClicks(t)
        if clicks == "return":
            txy_list.pop()
            t -= step
        else:
            txy_list.append((t, clicks))
            t += step

    tt, xylist = zip(*txy_list)
    result = []
    for i in range(n_objects):
        xys = [e[i] for e in xylist]
        xx, yy = zip(*xys)
        result.append(Trajectory(tt, xx, yy))

    if savefile is not None:
        Trajectory.save_list(result, savefile)
    return result
示例#8
0
def test_Trajectory_addy():
    trajectory = Trajectory([0, 1], [0], [0, 1]).addy(1)
    assert len(trajectory.yy) == 2
    assert trajectory.yy[0] == 1
    assert trajectory.yy[1] == 2
示例#9
0
def test_Trajectory_addx():
    trajectory = Trajectory([0, 1], [0], [0, 1]).addx(1)
    assert len(trajectory.xx) == 1
    assert trajectory.xx[0] == 1
示例#10
0
def test_Trajectory(tt, xx, yy, interpolation_results):
    trajectory = Trajectory(tt, xx, yy)
    for value, expected_result in interpolation_results.items():
        assert np.array_equal(trajectory(value), np.array(expected_result))