Пример #1
0
    def decorate_ass_line(self, text, screen_start_ts):
        """Decorate line with karaoke tags"""
        # Prefix the tag with centisecs prior to line in screen
        start_time = (convert_to_seconds(self.start_ts) -
                      convert_to_seconds(screen_start_ts)) * 100
        duration = (convert_to_seconds(self.end_ts) -
                    convert_to_seconds(self.start_ts)) * 100

        return f"{{\k{start_time}}}{{\kf{duration}}}{text}"
Пример #2
0
def test_audio_fadein(sound_type, fps, clip_duration, fadein_duration):
    if sound_type == "stereo":
        make_frame = lambda t: np.array(
            [np.sin(440 * 2 * np.pi * t),
             np.sin(160 * 2 * np.pi * t)]).T.copy(order="C")
    else:
        make_frame = lambda t: np.sin(440 * 2 * np.pi * t)

    clip = AudioClip(make_frame, duration=clip_duration, fps=fps)
    new_clip = audio_fadein(clip, fadein_duration)

    # first frame is muted
    first_frame = new_clip.get_frame(0)
    if sound_type == "stereo":
        assert len(first_frame) > 1
        for value in first_frame:
            assert value == 0.0
    else:
        assert first_frame == 0.0

    fadein_duration = convert_to_seconds(fadein_duration)

    n_parts = 10

    # cut transformed part into subclips and check the expected max_volume for
    # each one
    time_foreach_part = fadein_duration / n_parts
    start_times = np.arange(0, fadein_duration, time_foreach_part)
    for i, start_time in enumerate(start_times):
        end_time = start_time + time_foreach_part
        subclip_max_volume = new_clip.subclip(start_time,
                                              end_time).max_volume()

        possible_value = (i + 1) / n_parts
        assert round(subclip_max_volume, 2) in [
            possible_value,
            round(possible_value - 0.01, 5),
        ]

    # cut non transformed part into subclips and check the expected max_volume
    # for each one (almost 1)
    time_foreach_part = (clip_duration - fadein_duration) / n_parts
    start_times = np.arange(fadein_duration, clip_duration, time_foreach_part)
    for i, start_time in enumerate(start_times):
        end_time = start_time + time_foreach_part
        subclip_max_volume = new_clip.subclip(start_time,
                                              end_time).max_volume()

        assert round(subclip_max_volume, 4) == 1
Пример #3
0
 def parse_duration(self, line):
     """Parse the duration from the line that outputs the duration of
     the container.
     """
     try:
         time_raw_string = line.split(self.duration_tag_separator)[-1]
         match_duration = re.search(
             r"([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])",
             time_raw_string,
         )
         return convert_to_seconds(match_duration.group(1))
     except Exception:
         raise IOError(
             ("MoviePy error: failed to read the duration of file '%s'.\n"
              "Here are the file infos returned by ffmpeg:\n\n%s") %
             (self.filename, self.infos))
Пример #4
0
def test_audio_fadeout(mono_wave, stereo_wave, sound_type, fps, clip_duration,
                       fadeout_duration):
    if sound_type == "stereo":
        make_frame = stereo_wave(left_freq=440, right_freq=160)
    else:
        make_frame = mono_wave(440)

    clip = AudioClip(make_frame, duration=clip_duration, fps=fps)
    new_clip = audio_fadeout(clip, fadeout_duration)

    fadeout_duration = convert_to_seconds(fadeout_duration)

    n_parts = 10

    # cut transformed part into subclips and check the expected max_volume for
    # each one
    time_foreach_part = fadeout_duration / n_parts
    start_times = np.arange(
        clip_duration - fadeout_duration,
        clip_duration,
        time_foreach_part,
    )
    for i, start_time in enumerate(start_times):
        end_time = start_time + time_foreach_part
        subclip_max_volume = new_clip.subclip(start_time,
                                              end_time).max_volume()

        possible_value = 1 - i * 0.1
        assert round(subclip_max_volume, 2) in [
            round(possible_value, 2),
            round(possible_value - 0.01, 5),
        ]

    # cut non transformed part into subclips and check the expected max_volume
    # for each one (almost 1)
    time_foreach_part = (clip_duration - fadeout_duration) / n_parts
    start_times = np.arange(0, clip_duration - fadeout_duration,
                            time_foreach_part)
    for i, start_time in enumerate(start_times):
        end_time = start_time + time_foreach_part
        subclip_max_volume = new_clip.subclip(start_time,
                                              end_time).max_volume()

        assert round(subclip_max_volume, 4) == 1
Пример #5
0
def test_save_frame(util, with_mask, t, mask_color, frames):
    filename = os.path.join(util.TMP_DIR, "moviepy_VideoClip_save_frame.png")
    if os.path.isfile(filename):
        try:
            os.remove(filename)
        except PermissionError:
            pass

    width, height = (len(frames[0][0]), len(frames[0]))

    clip = BitmapClip(frames, fps=1)
    if with_mask:
        mask = ColorClip(color=mask_color, is_mask=True, size=(width, height))
        clip = clip.with_mask(mask)

    clip.save_frame(filename, t)

    t = int(convert_to_seconds(t))

    # expected RGB
    e_r, e_g, e_b = BitmapClip.DEFAULT_COLOR_DICT[frames[t][0][0]]

    im = Image.open(filename, mode="r")
    assert im.width == width
    assert im.height == height

    for i in range(im.width):
        for j in range(im.height):
            rgba = im.getpixel((i, j))
            if len(rgba) == 4:
                r, g, b, a = rgba
            else:
                r, g, b = rgba

            assert r == e_r
            assert g == e_g
            assert b == e_b

            if with_mask:
                assert round(a / 254, 2) == mask_color
Пример #6
0
def show(clip, t=0, with_mask=True, interactive=False):
    """
    Splashes the frame of clip corresponding to time ``t``.

    Parameters
    ------------

    t
      Time in seconds of the frame to display.

    with_mask
      ``False`` if the clip has a mask but you want to see the clip
      without the mask.

    """

    if isinstance(t, tuple):
        t = convert_to_seconds(*t)

    if with_mask and (clip.mask is not None):
        clip = CompositeVideoClip([clip.with_position((0, 0))])

    img = clip.get_frame(t)
    imdisplay(img)

    if interactive:
        result = []
        while True:
            for event in pg.event.get():
                if event.type == pg.KEYDOWN:
                    if event.key == pg.K_ESCAPE:
                        print("Keyboard interrupt")
                        return result
                elif event.type == pg.MOUSEBUTTONDOWN:
                    x, y = pg.mouse.get_pos()
                    rgb = img[y, x]
                    result.append({"position": (x, y), "color": rgb})
                    print("position, color : ", "%s, %s" % (str(
                        (x, y)), str(rgb)))
            time.sleep(0.03)
Пример #7
0
def file_to_subtitles(filename, encoding=None):
    """Converts a srt file into subtitles.

    The returned list is of the form ``[((start_time,end_time),'some text'),...]``
    and can be fed to SubtitlesClip.

    Only works for '.srt' format for the moment.
    """
    times_texts = []
    current_times = None
    current_text = ""
    with open(filename, "r", encoding=encoding) as file:
        for line in file:
            times = re.findall("([0-9]*:[0-9]*:[0-9]*,[0-9]*)", line)
            if times:
                current_times = [convert_to_seconds(t) for t in times]
            elif line.strip() == "":
                times_texts.append((current_times, current_text.strip("\n")))
                current_times, current_text = None, ""
            elif current_times:
                current_text += line
    return times_texts
Пример #8
0
def ffmpeg_parse_infos(
    filename,
    decode_file=False,
    print_infos=False,
    check_duration=True,
    fps_source="fps",
):
    """Get file infos using ffmpeg.

    Returns a dictionnary with the fields:
    "video_found", "video_fps", "duration", "video_nframes",
    "video_duration", "video_bitrate","audio_found", "audio_fps", "audio_bitrate"

    "video_duration" is slightly smaller than "duration" to avoid
    fetching the uncomplete frames at the end, which raises an error.

    """
    # Open the file in a pipe, read output
    cmd = [FFMPEG_BINARY, "-i", filename]
    if decode_file:
        cmd.extend(["-f", "null", "-"])

    popen_params = {
        "bufsize": 10 ** 5,
        "stdout": sp.PIPE,
        "stderr": sp.PIPE,
        "stdin": sp.DEVNULL,
    }

    if os.name == "nt":
        popen_params["creationflags"] = 0x08000000

    proc = sp.Popen(cmd, **popen_params)
    (output, error) = proc.communicate()
    infos = error.decode("utf8", errors="ignore")

    proc.terminate()
    del proc

    if print_infos:
        # print the whole info text returned by FFMPEG
        print(infos)

    lines = infos.splitlines()
    if "No such file or directory" in lines[-1]:
        raise IOError(
            (
                "MoviePy error: the file %s could not be found!\n"
                "Please check that you entered the correct "
                "path."
            )
            % filename
        )

    result = dict()

    # get duration (in seconds)
    result["duration"] = None

    if check_duration:
        try:
            if decode_file:
                line = [line for line in lines if "time=" in line][-1]
            else:
                line = [line for line in lines if "Duration:" in line][-1]
            match = re.findall("([0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9])", line)[0]
            result["duration"] = convert_to_seconds(match)
        except Exception:
            raise IOError(
                f"MoviePy error: failed to read the duration of file {filename}.\n"
                f"Here are the file infos returned by ffmpeg:\n\n{infos}"
            )

    # get the output line that speaks about video
    lines_video = [
        line for line in lines if " Video: " in line and re.search(r"\d+x\d+", line)
    ]

    result["video_found"] = lines_video != []

    if result["video_found"]:
        try:
            line = lines_video[0]

            # get the size, of the form 460x320 (w x h)
            match = re.search(" [0-9]*x[0-9]*(,| )", line)
            size = list(map(int, line[match.start() : match.end() - 1].split("x")))
            result["video_size"] = size
        except Exception:
            raise IOError(
                (
                    "MoviePy error: failed to read video dimensions in file %s.\n"
                    "Here are the file infos returned by ffmpeg:\n\n%s"
                )
                % (filename, infos)
            )
        match_bit = re.search(r"(\d+) kb/s", line)
        result["video_bitrate"] = int(match_bit.group(1)) if match_bit else None

        # Get the frame rate. Sometimes it's 'tbr', sometimes 'fps', sometimes
        # tbc, and sometimes tbc/2...
        # Current policy: Trust fps first, then tbr unless fps_source is
        # specified as 'tbr' in which case try tbr then fps

        # If result is near from x*1000/1001 where x is 23,24,25,50,
        # replace by x*1000/1001 (very common case for the fps).

        def get_tbr():
            match = re.search("( [0-9]*.| )[0-9]* tbr", line)

            # Sometimes comes as e.g. 12k. We need to replace that with 12000.
            s_tbr = line[match.start() : match.end()].split(" ")[1]
            if "k" in s_tbr:
                tbr = float(s_tbr.replace("k", "")) * 1000
            else:
                tbr = float(s_tbr)
            return tbr

        def get_fps():
            match = re.search("( [0-9]*.| )[0-9]* fps", line)
            fps = float(line[match.start() : match.end()].split(" ")[1])
            return fps

        if fps_source == "tbr":
            try:
                result["video_fps"] = get_tbr()
            except Exception:
                result["video_fps"] = get_fps()

        elif fps_source == "fps":
            try:
                result["video_fps"] = get_fps()
            except Exception:
                result["video_fps"] = get_tbr()

        # It is known that a fps of 24 is often written as 24000/1001
        # but then ffmpeg nicely rounds it to 23.98, which we hate.
        coef = 1000.0 / 1001.0
        fps = result["video_fps"]
        for x in [23, 24, 25, 30, 50]:
            if (fps != x) and abs(fps - x * coef) < 0.01:
                result["video_fps"] = x * coef

        if check_duration:
            result["video_nframes"] = int(result["duration"] * result["video_fps"])
            result["video_duration"] = result["duration"]
        else:
            result["video_nframes"] = 1
            result["video_duration"] = None
        # We could have also recomputed the duration from the number
        # of frames, as follows:
        # >>> result['video_duration'] = result['video_nframes'] / result['video_fps']

        # get the video rotation info.
        try:
            rotation_lines = [
                line
                for line in lines
                if "rotate          :" in line and re.search(r"\d+$", line)
            ]
            if len(rotation_lines):
                rotation_line = rotation_lines[0]
                match = re.search(r"\d+$", rotation_line)
                result["video_rotation"] = int(
                    rotation_line[match.start() : match.end()]
                )
            else:
                result["video_rotation"] = 0
        except Exception:
            raise IOError(
                (
                    "MoviePy error: failed to read video rotation in file %s.\n"
                    "Here are the file infos returned by ffmpeg:\n\n%s"
                )
                % (filename, infos)
            )

    lines_audio = [line for line in lines if " Audio: " in line]

    result["audio_found"] = lines_audio != []

    if result["audio_found"]:
        line = lines_audio[0]
        try:
            match = re.search(" [0-9]* Hz", line)
            hz_string = line[
                match.start() + 1 : match.end() - 3
            ]  # Removes the 'hz' from the end
            result["audio_fps"] = int(hz_string)
        except Exception:
            result["audio_fps"] = "unknown"
        match_bit = re.search(r"(\d+) kb/s", line)
        result["audio_bitrate"] = int(match_bit.group(1)) if match_bit else None

    return result
Пример #9
0
def test_multiply_volume_audioclip(
    sound_type,
    factor,
    duration,
    start_time,
    end_time,
):
    if sound_type == "stereo":
        make_frame = lambda t: np.array([
            np.sin(440 * 2 * np.pi * t),
            np.sin(160 * 2 * np.pi * t),
        ]).T.copy(order="C")
    else:
        make_frame = lambda t: [np.sin(440 * 2 * np.pi * t)]

    clip = AudioClip(
        make_frame,
        duration=duration if duration else 0.1,
        fps=22050,
    )
    clip_array = clip.to_soundarray()

    clip_transformed = multiply_volume(
        clip,
        factor,
        start_time=start_time,
        end_time=end_time,
    )
    clip_transformed_array = clip_transformed.to_soundarray()

    assert len(clip_transformed_array)

    if hasattr(clip_array, "shape") and len(clip_array.shape) > 1:
        # stereo clip
        left_channel_transformed = clip_transformed_array[:, 0]
        right_channel_transformed = clip_transformed_array[:, 1]

        if start_time is None and end_time is None:
            expected_left_channel_transformed = clip_array[:, 0] * factor
            expected_right_channel_transformed = clip_array[:, 1] * factor
        else:
            start_time = convert_to_seconds(
                start_time) if start_time else clip.start
            end_time = convert_to_seconds(end_time) if end_time else clip.end

            expected_left_channel_transformed = np.array([])
            expected_right_channel_transformed = np.array([])
            for i, frame in enumerate(clip_array):
                t = i / clip.fps
                transformed_frame = frame * (
                    factor if start_time <= t <= end_time else 1)
                expected_left_channel_transformed = np.append(
                    expected_left_channel_transformed,
                    transformed_frame[0],
                )
                expected_right_channel_transformed = np.append(
                    expected_right_channel_transformed,
                    transformed_frame[1],
                )

        assert len(left_channel_transformed)
        assert len(expected_left_channel_transformed)
        assert np.array_equal(
            left_channel_transformed,
            expected_left_channel_transformed,
        )

        assert len(right_channel_transformed)
        assert len(expected_right_channel_transformed)
        assert np.array_equal(
            right_channel_transformed,
            expected_right_channel_transformed,
        )

    else:
        # mono clip

        if start_time is None and end_time is None:
            expected_clip_transformed_array = clip_array * factor
        else:
            start_time = convert_to_seconds(
                start_time) if start_time else clip.start
            end_time = convert_to_seconds(end_time) if end_time else clip.end

            expected_clip_transformed_array = np.array([])
            for i, frame in enumerate(clip_array[0]):
                t = i / clip.fps
                transformed_frame = frame * (
                    factor if start_time <= t <= end_time else 1)
                expected_clip_transformed_array = np.append(
                    expected_clip_transformed_array,
                    transformed_frame,
                )
            expected_clip_transformed_array = np.array([
                expected_clip_transformed_array,
            ])

        assert len(expected_clip_transformed_array)

        assert np.array_equal(
            expected_clip_transformed_array,
            clip_transformed_array,
        )
Пример #10
0
def test_cvsecs(given, expected):
    """Test the convert_to_seconds funtion outputs correct times as per
    the docstring.
    """
    assert tools.convert_to_seconds(given) == expected
Пример #11
0
 def to_srt(sub_element):
     (start_time, end_time), text = sub_element
     formatted_start_time = convert_to_seconds(start_time)
     formatted_end_time = convert_to_seconds(end_time)
     return "%s - %s\n%s" % (formatted_start_time, formatted_end_time,
                             text)
Пример #12
0
 def __init__(self, line_data):
     self.text = line_data["line"]
     self.start_ts = convert_to_seconds(line_data["ts"])
     self.end_ts = convert_to_seconds(line_data.get("end_ts", 0))
Пример #13
0
 def __init__(self, screen_data):
     self._start_ts = (convert_to_seconds(screen_data.get("ts"))
                       if "ts" in screen_data else None)
     self._end_ts = (convert_to_seconds(screen_data.get("end_ts"))
                     if "end_ts" in screen_data else None)
     self.lines = self.create_lines(screen_data["lines"])