def split(self, video_id: str, crop_start: float = 0, crop_end: float = 0):
        input_file_path = Path(
            self.input_path, "{vid}.{ext}".format(vid=video_id,
                                                  ext=self.extension))

        cropped_path = self.output_path / "{vid}.{ext}".format(
            vid=video_id, ext=self.extension)

        if input_file_path.exists():
            # Audio
            if self.extension == "mp3" and self.should_crop(cropped_path):
                clip = AudioFileClip(str(input_file_path.absolute()))

                if crop_start is not 0 or crop_end is not 0:
                    clip = clip.subclip(crop_start, crop_end)

                if clip.audio:
                    clip.audio.write_audiofile(str(cropped_path.absolute()))

                clip.close()

            elif self.extension == "mp4" and self.should_crop(cropped_path):
                clip = VideoFileClip(str(input_file_path.absolute()))

                if crop_start is not 0 or crop_end is not 0:
                    clip = clip.subclip(crop_start, crop_end)

                if clip:
                    clip.write_videofile(str(cropped_path.absolute()))

                clip.close()
        else:
            # logger.warning('Failed to find file from: %s', input_file_path)
            pass
예제 #2
0
def play(file_name, temp_dir):
	"""Plays a random audio sample of the file with the provided name"""
	audio = Audio(file_name)
	start = randint(0, floor(audio.duration - DURATION))
	file_path = path.join(temp_dir, str(time()) + '.mp3')
	audio.subclip(start, start + DURATION).write_audiofile(file_path, verbose=False, progress_bar=False)
	terminal(file_path)
예제 #3
0
def test_issue_470():
    audio_clip = AudioFileClip("media/crunching.mp3")

    # t_end is out of bounds
    subclip = audio_clip.subclip(t_start=6, t_end=9)

    with pytest.raises(IOError):
        subclip.write_audiofile(os.path.join(TMP_DIR, "issue_470.wav"),
                                write_logfile=True)

    # but this one should work..
    subclip = audio_clip.subclip(t_start=6, t_end=8)
    subclip.write_audiofile(os.path.join(TMP_DIR, "issue_470.wav"),
                            write_logfile=True)
예제 #4
0
def segment_audio_file(src_path, dest_dir,
                  segment_duration=DEFAULT_AUDIO_SEGMENT_DURATION_SEC,
                  zeroes_padding=DEFAULT_ZEROES_PADDING):
    """
    For a given audio file at `src_path`, the audio is segmented
    into chunks of `segment_duration` seconds. Each segment
    is saved in `dest_dir` with a filename convention using
    the start and end time in seconds.


    src_path (str): absolute path to the audio file

    dest_dir (str): absolute path to the subdirectory

    segment_duration (int): the number of seconds for each segment

    zeroes_padding (int): the number of zeroes to pad each segment's filename


    Yields a generator; for each iteration, the path to a
    newly-created audio segment is returned, e.g.

        "./projects/myvideo/audio-segments/00000-00100.wav"
    """
    src_basename, src_ext = splitext(src_path)
    # e.g. for a file, "myvideo/audio.wav", audio_ext is ".wav"
    audio = AudioFileClip(src_path)
    total_seconds = audio.duration
    x_sec = 0
    while x_sec < total_seconds:
        y_sec = x_sec + segment_duration
        if y_sec > total_seconds:
            # when we've reached the end of the total duration
            # round to the next second
            y_sec = ceil(total_seconds)
            # turns out subclip does not like an endpoint bigger
            # than the clip's duration, so we leave off second argument
            segment = audio.subclip(x_sec)
        else:
            segment = audio.subclip(x_sec, y_sec)
        segment_basename = "%s-%s%s" % (
                str(x_sec).rjust(zeroes_padding, "0"),
                str(y_sec).rjust(zeroes_padding, "0"),
                src_ext)
        segment_full_path = join(dest_dir, segment_basename)
        segment.write_audiofile(segment_full_path)
        yield segment_full_path
        # set x_sec to equal y_sec, so that the next clip start at y_sec
        x_sec = y_sec
예제 #5
0
def getVideo(id):
    url = "http://coub.com/api/v2/coubs/" + str(id)
    r = requests.get(url)
    data = r.json()["file_versions"]["html5"]

    try:
        videoUrl = data["video"]["high"]["url"] if 'high' in data[
            "video"] else data["video"]["med"]["url"]
        audioUrl = data["audio"]["high"]["url"] if 'high' in data[
            "audio"] else data["audio"]["med"]["url"]
    except KeyError:
        return False

    video, videoName = getFile(videoUrl)
    audio, audioName = getFile(audioUrl)

    saveBinaryFile(videoName, video)
    saveBinaryFile(audioName, audio)

    videoclip = VideoFileClip(videoName)
    audioclip = AudioFileClip(audioName)

    videoclip = normalize(videoclip)
    videoclip = videoclip.set_audio(audioclip.subclip(0, videoclip.duration))

    garbage.append(videoName)
    garbage.append(audioName)

    return videoclip
예제 #6
0
    def read_file(self):

        start = self.num_calls * self.seq_length
        end = start + self.seq_length

        # Read label file, and get start-end timestamps
        labels, timestamps = self._read_label_file(start, end)

        clip = AudioFileClip(str(self.raw_file_path), fps=self.fps)

        num_seqs = self.num_seqs
        frames = []
        for i, t in enumerate(timestamps[:-1]):
            start_time = timestamps[i]
            end_time = timestamps[i + 1]

            data_frame = np.array(
                list(clip.subclip(start_time, end_time).iter_frames()))
            data_frame = data_frame.mean(1)[:self.num_samples]

            if data_frame.shape[0] < self.num_samples:
                data_frame = np.pad(data_frame,
                                    (0, self.num_samples -
                                     data_frame.shape[0] % self.num_samples),
                                    'constant')

            frames.append(data_frame.astype(np.float32))

        frames = np.array(frames).astype(np.float32)
        labels = np.array(labels).astype(np.float32)

        return frames, labels
예제 #7
0
class VideoBarrier(object):
    """docstring for VideoBarrier"""
    CLIP_DIR = "clip"

    def __init__(self, input, output, audio_path):
        super(VideoBarrier, self).__init__()
        self.input = input
        self.filename, _ = os.path.splitext(os.path.basename(input))
        self.output = os.path.join(output, VideoBarrier.CLIP_DIR)
        self.audio_path = audio_path

    def __enter__(self):
        self.v_clip = VideoFileClip(self.input, audio=False)
        self.a_clip = AudioFileClip(self.audio_path)
        return self

    def __exit__(self, exception_type, exception_value, traceback):
        self.v_clip.close()
        self.a_clip.close()

    def set_audio(self, t_start=0, t_end=None):
        self.v_clip = self.v_clip.set_audio(self.a_clip.subclip(
            t_start, t_end))

    def save(self, start_time=0, duration=60):
        audio_fname, _ = os.path.splitext(os.path.basename(self.audio_path))
        output_fname = "{}_{}.mp4".format(self.filename, audio_fname)
        output_path = os.path.join(self.output, output_fname)

        self.v_clip.set_duration(duration + 1).subclip(
            t_start=start_time,
            t_end=start_time + duration).write_videofile(output_path)
예제 #8
0
    def download(self, directory):
        bitrate = str(int(self.stream.bitrate / 1000)) + "k"
        url = self.stream.url
        extension = ".mp3" if self.onlyAudio else ".mp4"
        finalPath = os.path.join(directory,
                                 sanitizeFilename(self.name) + extension)

        clip = AudioFileClip(url) if self.onlyAudio else VideoFileClip(url)

        if self.stream.mediaType == "V":
            audioClip = AudioFileClip(self.audioStream.url)

            clip = clip.set_audio(audioClip)

        if self.volumeMultiplier != 0:
            newVolume = (
                1 + self.volumeMultiplier / 100
            )**2 if self.volumeMultiplier < 0 else self.volumeMultiplier / 5

            clip = clip.volumex(newVolume)

        if self.cut:

            # Clip the video
            low = self.lowCut
            high = self.highCut
            if low < 0:
                low = 0
            if high > clip.end:
                high = clip.end
            clip = clip.subclip(low, high)

            # Save as final path name

            if self.onlyAudio:
                clip.write_audiofile(finalPath, bitrate=bitrate)
                self.applyID3Tags(finalPath)
            else:
                clip.write_videofile(finalPath, threads=NUM_THREADS)

            clip.close()

        elif self.onlyAudio:

            clip.write_audiofile(finalPath, bitrate=bitrate)

            clip.close()

            self.applyID3Tags(finalPath)

        else:

            clip.write_videofile(finalPath, threads=NUM_THREADS)
            clip.close()
        try:
            clip.close()
        except:
            pass
        return finalPath
예제 #9
0
def audio_cut_at(filename, cuts):
    """Cuts audio based on (start,end) tuples

    Args:
        filename (string): the name of the file from which parts are to be cut
        cuts (list): list of (start,end) tuples, each tuple represents the parts to be cut, both start and end are float values

    Returns:
        moviepy.VideoClip
    """
    audio_to_cut = AudioFileClip(filename)
    final_audio = audio_to_cut.subclip(cuts[0][0], cuts[0][1])
    for i in range(1, len(cuts)):
        a = audio_to_cut.subclip(cuts[i][0], cuts[i][1])
        final_audio = concatenate_audioclips([final_audio, a])
    final_audio.write_audiofile('done_proc.wav')
    return final_audio
예제 #10
0
def trim_original_audio_to_audio_subclips(input_video_file_name,
                                          matched_time_section,
                                          output_file_name):
    """
    save edited audio where my bias appeared in video
    """
    audio = AudioFileClip(input_video_file_name)
    audio_subclips = [audio.subclip(s, e) for (s, e) in matched_time_section]
    editted_audio = concatenate_audioclips(audio_subclips)
    editted_audio.write_audiofile(output_file_name)
예제 #11
0
    def mp4(self):
        this_dir = os.listdir(self.thumbnail_dir)
        self.logging.log.info("Finding the pictures from {}".format(this_dir))

        # finding all the pics with jpg extension
        filepaths = [
            os.path.join(self.thumbnail_dir, fname) for fname in this_dir
            if fname.endswith("jpg")
        ]

        directory = {}
        for root, dirs, files in os.walk(self.thumbnail_dir):
            for fname in files:
                filepath = os.path.join(root, fname)
                try:
                    key = fname.replace(".jpg", "")
                except:
                    key = None
                if key != None:
                    directory[key] = filepath

        new_path = []
        for k in sorted(directory.keys()):
            filepath = directory[k]
            new_path.append(filepath)

        # Setting Image time duration
        self.logging.log.info("Setting start and end audio duration")
        pic_duration = (int(self.end_dur) -
                        int(self.start_dur)) / len(new_path)
        clips = [
            ImageClip(m).set_duration(pic_duration).crossfadein(0.5)
            for m in new_path
        ]

        # Concatinate all the videos
        concat_clip = concatenate_videoclips(clips, method="compose")

        # Find downloaded song from youtuebe & if not found pick any random from liberary
        self.logging.log.info("Fetching song ....")
        song = self.get_downloaded_song()

        self.logging.log.info("Setting background music")
        background_audio_clip = AudioFileClip(song)
        bg_music = background_audio_clip.subclip(self.start_dur, self.end_dur)

        self.logging.log.info("Preparing final video")
        final_clip = concat_clip.set_audio(bg_music)
        final_clip.write_videofile(os.path.join(self.output_video,
                                                "final.mp4"),
                                   codec='libx264',
                                   audio_codec="aac",
                                   fps=24)

        return self.output_video + "\\final.mp4"
예제 #12
0
def add_audio_to_video(video_file_path, audio_file_path, target_file_path):
    videoclip = VideoFileClip(video_file_path)
    audioclip = AudioFileClip(audio_file_path)
    audioclip = audioclip.subclip(0, videoclip.duration)

    new_audioclip = CompositeAudioClip([audioclip])
    # videoclip.audio = new_audioclip
    videoclip2 = videoclip.set_audio(new_audioclip)
    videoclip2.write_videofile(target_file_path,
                               codec="libx264",
                               audio_codec="aac")
예제 #13
0
파일: editor.py 프로젝트: tnoff/hathor
def generate_audio_volume_array(audio_input):
    '''
    Generate a list of volumes for an audio input
    audio_input     :   Either a AudioFileClip instance, or the name of a file
    '''
    if not isinstance(audio_input, AudioFileClip):
        audio_input = AudioFileClip(audio_input)
    cut = lambda i: audio_input.subclip(i, i+1).to_soundarray(fps=44100)
    volume = lambda array: np.sqrt(((1.0*array)**2).mean())
    volumes = [volume(cut(i)) for i in range(0, int(audio_input.duration-1))]
    return volumes
예제 #14
0
파일: editor.py 프로젝트: tnoff/hathor
def commercial_remove(input_file, output_file, non_commercial_intervals, verbose=True):
    '''Remove commercials from audio file
       input_file: audio file to remove commercials from
       output_file: output path of new audio file with no commercials
       audio_data: result from commercial_identify call
    '''
    audio_clip = AudioFileClip(input_file)
    clips = []
    for start, end in non_commercial_intervals:
        clips.append(audio_clip.subclip(start, end))
    final = concatenate_audioclips(clips)
    final.write_audiofile(output_file, verbose=verbose)
예제 #15
0
파일: mp3.py 프로젝트: chfw/youtube2mp3
def save_as_mp3(file_name, clip_info):
    clip = AudioFileClip(file_name)

    t_start, t_end, duration = parse_clip_info(clip_info)
    print(t_start, t_end, duration)
    clip = clip.subclip(t_start=t_start, t_end=t_end)
    if duration:
        clip = clip.set_duration(duration)
    name, suffix = os.path.splitext(file_name)
    try:
        clip.write_audiofile('{}.mp3'.format(name))
    except IndexError:
        print("Please try a bit longer duration")
        raise
예제 #16
0
def write_video(images):
    normalize_images_in_library()
    convert_to_mp3()

    # Write video clip without audio
    print("Beginning video render.")
    clip = ImageSequenceClip(images, fps=ANIMATION_FRAME_RATE)
    audio_clip = AudioFileClip(TEMP_RECORDING_MP3)
    audio_clip = audio_clip.subclip(0, clip.duration)
    clip = clip.set_audio(audio_clip)
    clip.write_videofile(VIDEO_PATH, fps=ANIMATION_FRAME_RATE, codec='libx264',
                         audio_codec="aac")

    print("Video rendered and saved with audio.")
    exit_program()
예제 #17
0
def extract_audio(filepath, fragment_size=10):
        fragments = []

        audio_clip = AudioFileClip(filepath, fps=15000)
        count_of_fragments = math.ceil(audio_clip.duration / fragment_size)

        for i in range(count_of_fragments):
            t_start = i * fragment_size
            t_end = min(t_start + fragment_size, audio_clip.duration)
            audio_subclip = audio_clip.subclip(t_start, t_end)

            with tempfile.NamedTemporaryFile(suffix='.ogg') as fp:
                audio_subclip.write_audiofile(fp.name, logger=None)
                fragments.append(fp.read())

        return fragments
예제 #18
0
def making_video(video_clips, musicFile, best_music_beats, best_video_beats,
                 file_name):

    # 开始剪辑视频
    video = video_clips
    audio = AudioFileClip(musicFile)
    print(video.duration)

    audio_use = audio.subclip(best_music_beats[0], best_music_beats[-1])
    print("audio_time:", audio_use.duration)

    print("best_video_beats", best_video_beats)
    print('best_music_beats', best_music_beats)

    #统一剪辑 - 快/不精确
    #change_size = best_music_beats[-1]/best_video_beats[-1]
    change_size = audio_use.duration / video.duration
    video_noaudio = video.fl_time(lambda t: t / change_size,
                                  apply_to=['video', 'audio']).set_end(
                                      best_video_beats[-1] * change_size)
    # video_noaudio.write_videofile('./temp/'+file_name.split('/')[-1]+"_111.mp4")

    #分段剪辑 - 慢/但更精确
    #video_clips = []
    #time = 0
    # for i in range(1,len(best_video_beats)):
    #	video_clip = 0
    #	#获取视频片段
    #	video_1 = video.subclip(best_video_beats[i-1],best_video_beats[i])
    #	#变速
    #	video_time = best_video_beats[i]-best_video_beats[i-1]
    #	music_time = best_music_beats[i]-best_music_beats[i-1]
    #	change_size = music_time/video_time #选择快进或者慢放
    #	video_clip = video_1.fl_time(lambda t:t/change_size,apply_to=['video','audio']).set_end(video_1.duration*change_size)
    #	print("change_size",change_size)
    #	video_clip.write_videofile('./temp/'+file_name.split('/')[-1]+str(i)+".mp4")
    #	path = './temp/'+file_name.split('/')[-1]+str(i)+".mp4"
    #	clips = VideoFileClip(path)
    #	time += clips.duration
    #	video_clips.append(clips)
    # print(time)
    #video_noaudio = concatenate_videoclips(video_clips)

    # 添加音乐
    video_result = video_noaudio.set_audio(audio_use)
    video_result.write_videofile(file_name, audio_codec="aac")
예제 #19
0
    def _get_samples(self, data_file: str, label_file: str):
        """ Read samples from file.
        
        Args:
          data_file (str): File to read the data from.
          label_file (str): File to read the labels from.
        
        Returns:
          frames (np.array): Frames of each file.
          labels (np.array): Labels for each frame.
          seq_num (int): Number of samples in file.
          num_samples (int): Number of samples per frame.
          attrs_name (str): Label names.
        """

        file_data, attrs_name = self.labelfile_reader.read_file(label_file)
        file_data = np.array(file_data).astype(np.float32)
        timestamps = file_data[:, 0]
        labels = file_data[:-1, 1:]

        seq_num = labels.shape[0] - 1

        clip = AudioFileClip(str(data_file), fps=self.fps)

        num_samples = int(self.fps * (timestamps[1] - timestamps[0]))
        frames = []
        for i in range(len(timestamps) - 1):
            start_time = timestamps[i]
            end_time = timestamps[i + 1]

            data_frame = np.array(
                list(clip.subclip(start_time, end_time).iter_frames()))
            data_frame = data_frame.mean(1)[:num_samples]

            if data_frame.shape[0] < num_samples:
                data_frame = np.pad(
                    data_frame,
                    (0, num_samples - data_frame.shape[0] % num_samples),
                    'constant')

            frames.append(data_frame.astype(np.float32))

        frames = np.array(frames).astype(np.float32)
        labels = np.array(labels).astype(np.float32)

        return frames, labels, seq_num, num_samples, attrs_name
예제 #20
0
    def addAudioFromClip(self, clipToExtract, start_time, end_time):
        """
        Instead of using an audio file like the method before this, it takes another video such as an mp4 file
        and rips the audio out of it, converts it into an AudioClip, and overlays it on the clip that is
        currently being worked on.

        ****This DOES NOT work with clips made through the VideoFileClip() method, since they have been processed
        as a different file type, and already have their own audio attribute. To access such, one just needs to call
        'clip'.audio, clip being your target clip for audio extraction.

        :param clipToExtract: video from directory (mp4, etc)
        :return: adds audio to the clip being worked on (self.clip)

        """

        thisAudio = AudioFileClip(clipToExtract)
        changedAudio = thisAudio.subclip(start_time, end_time)
        self.clip = self.clip.set_audio(changedAudio)
예제 #21
0
    def addAudioFromFile(self, audio, start_time, end_time):
        """
        Uses moviepy.audio.io.AudioFileClip module. from Doc:
        An audio clip read from a sound file, or an array. The whole file is not loaded in memory.
        Instead, only a portion is read and stored in memory. this portion includes frames before and after the
        last frames read, so that it is fast to read the sound backward and forward.

        :param audio: audio file taken from directory (mp3, wav, etc)
        :return: adds audio to the clip being worked on (self.clip)


        This method works with the clip that was made and is stored on self.clip, which means it will alter the
        a clip that is already being made, not a new external clip. This is to avoid discrepancies when making
        new clips with or without overlay audio.
        """

        thisAudio = AudioFileClip(audio)
        changedAudio = thisAudio.subclip(start_time, end_time)
        self.clip = self.clip.set_audio(changedAudio)
예제 #22
0
def startPreview(onlyAudio, url, cut, low, high, volume):

    clip = None
    if onlyAudio:
        clip = AudioFileClip(url)
    else:
        clip = VideoFileClip(url)

    if volume != 0:

        clip = clip.volumex(volume)

    if cut:
        clip = clip.subclip(low, high)
    print("PREVIEWING WITH MOVIEPY")
    clip.preview()
    clip.close()

    # See https://github.com/Zulko/moviepy/issues/575
    pygame.quit()
def merge_audio_video(audio_file: str, video_file: str, frame_number: int = 0):
    audio_clip = AudioFileClip(audio_file)
    video_clip = VideoFileClip(video_file)

    start_time, video_fps = starting_point_in_time(video_file, frame_number)

    # audio_clip.duration = video_clip.duration
    orig_video_duration = video_clip.duration
    orig_audio_duration = audio_clip.duration

    audio_clip = audio_clip.subclip(
        0, min(orig_video_duration - start_time, orig_audio_duration))

    first_part = video_clip.subclip(0, start_time)
    second_part = video_clip.subclip(start_time, orig_video_duration)

    second_part.audio = audio_clip

    final_video_clip = concatenate_videoclips([first_part, second_part])
    return final_video_clip
    def _get_samples(self, data_file: str, label_file: str):

        file_data, attrs_name = self.labelfile_reader.read_file(label_file)
        file_data = np.array(file_data).astype(np.float32)

        clip = AudioFileClip(str(data_file), fps=self.fps)

        # CHANGED (sample duration of 40 ms (or 100 ms))
        # Every sample has the same label
        #sample_duration = 0.04
        sample_duration = 1
        num_samples = int(self.fps * sample_duration)

        seq_num = int(clip.duration / sample_duration)

        labels = np.repeat(file_data, seq_num, axis=0)

        frames = []
        for i in range(seq_num):
            start_time = i * sample_duration
            end_time = (i + 1) * sample_duration

            data_frame = np.array(
                list(clip.subclip(start_time, end_time).iter_frames()))
            data_frame = data_frame.mean(1)[:num_samples]

            frames.append(data_frame.astype(np.float32))

        frames = np.array(frames).astype(np.float32)
        labels = np.array(labels).astype(np.float32)

        #print(frames.shape)
        #print(frames[0])
        #print(labels)
        #print(labels.shape)
        #print(data_file)
        #print(seq_num)
        #print(num_samples)
        #print(attrs_name)

        return frames, labels, seq_num, num_samples, attrs_name
예제 #25
0
    def split_songs(self):
        """ Creates songs from downloaded audio file by clipping it using provided time links. """

        # Instantiates AudioFileClip object from Moviepy module
        audio = AudioFileClip(self.filename)

        # List of time links scraped from audio file's YouTube page
        times = self.get_time_links()

        # Creates songs based on number of time links scraped
        for i in range(0, len(times)):

            # Time when song starts in audio file
            start_time = Splitter.time_str_to_tuple(times[i])

            # Time when song ends in audio file or None if last song
            end_time = None if i == (len(times) - 1) else Splitter.time_str_to_tuple(times[i + 1])

            # Creates song
            song = audio.subclip(start_time, end_time)
            song.write_audiofile("clip{}.mp3".format(i+1))
예제 #26
0
def Dark_MODE(clip):
    audio=AudioFileClip("soundtrack.mp3")
    
    clip1=clip
    clip2=clip1.fx(vfx.mirror_x)
    clip3=clip1.fx(vfx.mirror_y)
    clip4=clip2.fx(vfx.mirror_y)


    final_clip=clips_array([[clip1,clip2],[clip3,clip4]])
    if(final_clip.duration>=audio.duration):
        ##Audio loops to video size
        audio = (audio.audio_loop( audio, duration=final_clip.duration).audio_fadeout(2))
        #final_clip=final_clip.subclip(0,dark_audio.duration)
    else:
        ##Audio loops
        audio=(audio.subclip(0,final_clip.duration).audio_fadeout(2))
    
    #audio=audio.fadeout(2)
    final_clip=final_clip.set_audio(audio)
    final_clip.resize(width=480).write_videofile("Dark.mp4")
예제 #27
0
from moviepy.editor import VideoFileClip, concatenate_videoclips, CompositeVideoClip, AudioFileClip

audio = AudioFileClip("song.mp3")
audio = audio.subclip((0, 0), (0, 43))
video1 = VideoFileClip("outputM.mp4")
video2 = VideoFileClip("end.mp4")
final_clip = concatenate_videoclips([video1, video2])
final = final_clip.set_audio(audio)
i = 13
final.write_videofile("final.mp4", codec='mpeg4', audio_codec='libvorbis')
예제 #28
0
source_path = os.path.join(SAMPLE_INPUTS, 'sample.mp4')
source_audio_path = os.path.join(SAMPLE_INPUTS, 'audio.mp3')

mix_audio_dir = os.path.join(SAMPLE_OUTPUTS, "mixed-audio")
os.makedirs(mix_audio_dir, exist_ok=True)
og_audio_path = os.path.join(mix_audio_dir, "og.mp3")
final_audio_path = os.path.join(mix_audio_dir, 'final-audio.mp3')
final_video_path = os.path.join(mix_audio_dir, 'final-video.mp4')

video_clip = VideoFileClip(source_path)

orignal_audio = video_clip.audio
orignal_audio.write_audiofile(og_audio_path)

backgroud_audio_clip = AudioFileClip(source_audio_path)
bg_music = backgroud_audio_clip.subclip(0, video_clip.duration)

#bg_music = bg_music.fx(volumex, 0.10)
bg_music = bg_music.volumex(0.10)
#bg_music.write_audiofile()

final_audio = CompositeAudioClip([orignal_audio, bg_music])
final_audio.write_audiofile(final_audio_path, fps=orignal_audio.fps)

#new_audio = AudioFileClip(final_audio_path)
#final_clip = video_clip.set_audio(new_audio)

final_clip = video_clip.set_audio(final_audio)
final_clip.write_videofile(final_video_path,
                           codec='libx264',
                           audio_codec="aac")
mixed_audio_dir = os.path.join(SAMPLE_OUTPUTS, 'mixed_audio')

os.makedirs(mixed_audio_dir, exist_ok=True)
og_audio_dir = os.path.join(mixed_audio_dir, 'og.mp3')
final_audio_dir = os.path.join(mixed_audio_dir, 'final-audio.mp3')
final_video_dir = os.path.join(mixed_audio_dir, 'final-video.mp4')

video_clip = VideoFileClip(source_video_path)
original_audio = video_clip.audio
# extracting the original audio
original_audio.write_audiofile(og_audio_dir)

# another audio file to clip
background_audio_clip = AudioFileClip(source_audio_path)
# making subclip of this of same length of video clip
bg_music = background_audio_clip.subclip(t_start=0, t_end=video_clip.duration)

# now we want the background music to be low
bg_music = bg_music.volumex(0.10)  # 10% of it's audio
# or bg_music = bg_music.fx(vfx.volumex, 0.10)

final_audio = CompositeAudioClip([original_audio, bg_music])
final_audio.write_audiofile(final_audio_dir, fps=original_audio.fps)

final_clip = video_clip.set_audio(final_audio)
final_clip.write_videofile(final_video_dir)

# if error in audio file:
# new_audio = AudioFileClip(final_audio_path)
# final_clip = video_clip.set_audio(new_audio)
background_audio_clip = AudioFileClip(source_audio_path)

# creating text clip
text = '''
This clip shows how to open terminal.
'''
intro_duration = 5
intro_text = TextClip(txt=text,
                      color='white',
                      fontsize=70,
                      size=video_clip.size)
intro_text = intro_text.set_fps(video_clip.fps)
intro_text = intro_text.set_duration(intro_duration)
intro_text = intro_text.set_position('center')

intro_music = background_audio_clip.subclip(t_start=0, t_end=intro_duration)
intro_text = intro_text.set_audio(intro_music)

intro_video_dir = os.path.join(overlay_text_dir, 'intro-video.mp4')
intro_text.write_videofile(intro_video_dir)

# overlaying text on the original video
w, h = video_clip.size
watermark_text = TextClip(txt='CFE',
                          fontsize=30,
                          align='East',
                          color='white',
                          size=(w, 30))
watermark_text = watermark_text.set_fps(video_clip.fps)
watermark_text = watermark_text.set_duration(video_clip.duration)
watermark_text = watermark_text.set_position('bottom')
예제 #31
0
video_filename = camera.stop_recording()
del camera
# get choice from user
choice = None
while True:
    choice = raw_input("type [y]es to keep, or [n]o to abandon...").strip()
    if (choice == "y" or choice == "yes"):
        break
    elif (choice == "n" or choice == "no"):
        print("abandoned take")
        exit(0)
print("loading video clip into moviepy")
video_clip = VideoFileClip(video_filename)
# video_clip = VideoFileClip("raw/mov_2019-08-25T21-02-53.mp4")
print("got video clip in moviepy")
audio_clip = AudioFileClip(audio_filename)
# audio_clip = AudioFileClip("raw/audio_2019-08-25T23-06-23.wav")
print("got clips")
audio_clip = audio_clip.subclip(TIME_DELAY_AUDIO_VIDEO)
print("chopped audio")
video_clip = video_clip.set_audio(audio_clip)
print("set the audio")
# add effects to video
video_clip = video_clip.fx(vfx.fx.fadein.fadein,
                           duration=0.5).fx(vfx.fx.fadeout.fadeout,
                                            duration=0.5)
print("added effects")
video_clip.write_videofile(
    os.path.join(SAVE_FOLDER, add_time("mixed_video.mp4")))
print("wrote video")
예제 #32
0
    def transcribe_bytes(self, byte_data, clip_length=None, audio_mimetype='', compress=True):

        '''
            a method to transcribe text from audio byte data
            
        :param byte_data: byte data in buffer with audio data 
        :param clip_length: [optional] integer with seconds to divide clips into
        :param compress: [optional] boolean to convert file to audio/ogg
        :param audio_mimetype: [optional] string with byte data mimetype
        :return: dictionary with transcribed text segments in 'segments' key
        '''

        title = '%s.transcribe_bytes' % self.__class__.__name__
        bytes_arg = "%s(byte_data=b'...')" % title

    # validate inputs
        input_fields = {
            'clip_length': clip_length,
            'audio_mimetype': audio_mimetype
        }
        for key, value in input_fields.items():
            if value:
                object_title = '%s(%s=%s)' % (title, key, str(value))
                self.fields.validate(value, '.%s' % key, object_title)

    # validate data mimetype
        if audio_mimetype:
            file_extension = ''
            for key, value in self.fields.schema['audio_extensions'].items():
                if value['mimetype'] == audio_mimetype:
                    file_extension = value['extension']
        else:
            if self.magic:
                magic_details = self.magic.analyze(byte_data=byte_data)
                file_name = magic_details['name']
                if not file_name:
                    file_name = 'audio'
                file_name += magic_details['extension']
                ext_kwargs = {
                    'file_name': file_name,
                    'extension_map': self.fields.schema['audio_extensions'],
                    'method_title': title,
                    'argument_title': 'byte_data'
                }
                file_details = self._validate_extension(**ext_kwargs)
                audio_mimetype = file_details['mimetype']
                file_extension = file_details['extension']
            else:
                raise ValueError('%s argument requires audio_mimetype (or magic) to determine its mimetype.' % bytes_arg)

    # construct default return
        transcript_details = {
            'error': '',
            'segments': []
        }

    # transcribe entire buffer
        if clip_length is None:
            try:
                transcript = self.client.recognize(byte_data, audio_mimetype, continuous=True)
                if transcript['results']:
                    for result in transcript['results']:
                        transcript_details['segments'].append(result['alternatives'][0])
            except Exception as err:
                transcript_details['error'] = err

    # save buffer to disk, segment and transcribe in multiple threads
        else:
            import os
            from time import sleep
            from math import ceil
            from moviepy.editor import AudioFileClip

        # save buffer to disk
            clip_folder = self._create_folder()
            full_name = 'audio_full.%s' % file_extension
            full_path = os.path.join(clip_folder, full_name)
            with open(full_path, 'wb') as f:
                f.write(byte_data)
                f.close()

        # convert file
            if compress:
                file_name, file_extension = os.path.splitext(full_path)
                if file_extension != '.ogg':
                    full_path = self.convert_audio(full_path, 'audio/ogg', True)
                    file_details = {
                        'name': 'audio_full.ogg',
                        'extension': '.ogg',
                        'mimetype': 'audio/ogg'
                    }
                    audio_mimetype = file_details['mimetype']
                    file_extension = file_details['extension']

        # open audio file
            count = 0
            retry_count = 10
            while True:
                try:
                    audio = AudioFileClip(full_path)
                    break
                except PermissionError:
                    sleep(.05)
                    count += 1
                    if count > retry_count:
                        raise
            audio_duration = audio.duration

        # construct list of files to transcribe
            file_list = []
            if audio_duration < clip_length:
                file_list.append(full_path)
            else:

        # create temporary audio files
                count = 0
                t_start = 0
                while t_start < audio_duration:
                    t_end = t_start + clip_length
                    if t_end > audio_duration:
                        t_end = ceil(audio_duration)
                        segment = audio.subclip(t_start)
                    else:
                        segment = audio.subclip(t_start, t_end)
                    clip_name = 'audio%s.%s' % (count, file_extension)
                    clip_path = os.path.join(clip_folder, clip_name)
                    segment.write_audiofile(clip_path, verbose=False)
                    file_list.append(clip_path)
                    count += 1
                    t_start = t_end

        # run file transcription method
            transcript_details = self._transcribe_files(file_list, audio_mimetype)

        # remove temp files
            if len(file_list) > 1:
                from labpack.records.settings import remove_settings
                for file in file_list:
                    remove_settings(file, remove_dir=True)

        return transcript_details
예제 #33
0
    def transcribe_url(self, file_url, clip_length=None, compress=True):

        '''
            a method to transcribe the text from an audio url

        :param file_path: string with url to audio file on web
        :param clip_length: [optional] integer with seconds to divide clips into
        :param compress: [optional] boolean to convert file to audio/ogg
        :return: dictionary with transcribed text segments in 'segments' key
        '''

        title = '%s.transcribe_url' % self.__class__.__name__

    # validate inputs
        input_fields = {
            'file_url': file_url,
        }
        if clip_length is not None:
            input_fields['clip_length'] = clip_length
        for key, value in input_fields.items():
            object_title = '%s(%s=%s)' % (title, key, str(value))
            self.fields.validate(value, '.%s' % key, object_title)

    # construct empty file details
        file_details = {
            'name': '',
            'mimetype': '',
            'extension': ''
        }

    # retrieve file name
        from urllib.parse import urlsplit
        file_arg = '%s(file_url=%s)' % (title, str(file_url))
        url_path = urlsplit(file_url).path
        path_segments = url_path.split('/')
        file_details['name'] = path_segments[-1]
        if not file_details['name']:
            raise ValueError('%s must have a file name.' % file_arg)

    # validate file extension
        ext_kwargs = {
            'file_name': file_details['name'],
            'extension_map': self.fields.schema['audio_extensions'],
            'method_title': title,
            'argument_title': 'file_url'
        }
        file_details = self._validate_extension(**ext_kwargs)

    # retrieve file data
        file_buffer = self._get_data(file_url, file_details['name'], title, 'file_url')
        if isinstance(file_buffer, dict):
            raise Exception(str(file_buffer))

    # validate file mimetype
        if self.magic:
            file_data = file_buffer.getvalue()
            magic_details = self.magic.analyze(byte_data=file_data)
            mimetype_text = file_details['mimetype'][6:]
            if mimetype_text not in magic_details['mimetype']:
                raise ValueError('%s byte data mimetype %s does not match %s file extension.' % (file_arg, magic_details['mimetype'], file_details['extension']))

    # construct default return
        transcript_details = {
            'error': '',
            'segments': []
        }

    # transcribe entire buffer
        if clip_length is None:
            try:
                file_data = file_buffer.getvalue()
                transcript = self.client.recognize(file_data, file_details['mimetype'], continuous=True)
                if transcript['results']:
                    for result in transcript['results']:
                        transcript_details['segments'].append(result['alternatives'][0])
            except Exception as err:
                transcript_details['error'] = err

    # save buffer to disk, segment and transcribe in multiple threads
        else:
            import os
            from time import sleep
            from math import ceil
            from moviepy.editor import AudioFileClip

        # save buffer to disk
            clip_folder = self._create_folder()
            full_name = 'audio_full.%s' % file_details['extension']
            full_path = os.path.join(clip_folder, full_name)
            with open(full_path, 'wb') as f:
                f.write(file_buffer.getvalue())
                f.close()

        # convert file
            if compress:
                file_name, file_extension = os.path.splitext(full_path)
                if file_extension != '.ogg':
                    full_path = self.convert_audio(full_path, 'audio/ogg', True)
                    file_details = {
                        'name': 'audio_full.ogg',
                        'extension': '.ogg',
                        'mimetype': 'audio/ogg'
                    }

        # open audio file
            count = 0
            retry_count = 10
            while True:
                try:
                    audio = AudioFileClip(full_path)
                    break
                except PermissionError:
                    sleep(.05)
                    count += 1
                    if count > retry_count:
                        raise
            audio_duration = audio.duration

        # construct list of files to transcribe
            file_list = []
            if audio_duration < clip_length:
                file_list.append(full_path)
            else:

        # create temporary audio files
                count = 0
                t_start = 0
                while t_start < audio_duration:
                    t_end = t_start + clip_length
                    if t_end > audio_duration:
                        t_end = ceil(audio_duration)
                        segment = audio.subclip(t_start)
                    else:
                        segment = audio.subclip(t_start, t_end)
                    clip_name = 'audio%s.%s' % (count, file_details['extension'])
                    clip_path = os.path.join(clip_folder, clip_name)
                    segment.write_audiofile(clip_path, verbose=False)
                    file_list.append(clip_path)
                    count += 1
                    t_start = t_end

        # run file transcription method
            transcript_details = self._transcribe_files(file_list, file_details['mimetype'])

        # remove temp files
            if len(file_list) > 1:
                from labpack.records.settings import remove_settings
                for file in file_list:
                    remove_settings(file, remove_dir=True)

        return transcript_details
예제 #34
0
    def transcribe_file(self, file_path, clip_length=10, compress=True):

        '''
            a method to transcribe the text from an audio file
        
        EXAMPLE: https://github.com/dannguyen/watson-word-watcher
        
        :param file_path: string with path to audio file on localhost
        :param clip_length: [optional] integer with seconds to divide clips into
        :param compress: [optional] boolean to convert file to audio/ogg
        :return: dictionary with transcribed text segments in 'segments' key
        '''

        title = '%s.transcribe_file' % self.__class__.__name__

    # validate inputs
        input_fields = {
            'file_path': file_path,
            'clip_length': clip_length
        }
        for key, value in input_fields.items():
            object_title = '%s(%s=%s)' % (title, key, str(value))
            self.fields.validate(value, '.%s' % key, object_title)

    # run conversion
        import os
        if compress:
            file_name, file_extension = os.path.splitext(file_path)
            if file_extension != '.ogg':
                file_path = self.convert_audio(file_path, 'audio/ogg', True)

    # construct empty file details
        file_details = {
            'name': '',
            'mimetype': '',
            'extension': ''
        }

    # retrieve file name
        file_arg = '%s(file_path=%s)' % (title, str(file_path))
        split_file = os.path.split(file_path)
        file_details['name'] = split_file[0]
        if len(split_file) > 1:
            file_details['name'] = split_file[1]
        if not file_details['name']:
            raise ValueError('%s must have a file name.' % file_arg)

    # validate file extension
        ext_kwargs = {
            'file_name': file_details['name'],
            'extension_map': self.fields.schema['audio_extensions'],
            'method_title': title,
            'argument_title': file_path
        }
        regex_details = self._validate_extension(**ext_kwargs)
        file_details.update(**regex_details)

    # retrieve byte data
        if not os.path.exists(file_path):
            raise ValueError('%s is not a valid file path.' % file_arg)

    # validate file mimetype
        if self.magic:
            magic_details = self.magic.analyze(file_path)
            mimetype_text = file_details['mimetype'][6:]
            if mimetype_text not in magic_details['mimetype']:
                raise ValueError('%s byte data mimetype %s does not match %s file extension.' % (file_arg, magic_details['mimetype'], file_details['extension']))

    # import dependencies
        from math import ceil
        from moviepy.editor import AudioFileClip

    # open audio file
        audio = AudioFileClip(file_path)
        audio_duration = audio.duration

    # construct list of files to transcribe
        file_list = []
        if audio_duration < clip_length:
            file_list.append(file_path)
        else:
    # create temporary audio files
            clip_folder = self._create_folder()
            count = 0
            t_start = 0
            while t_start < audio_duration:
                t_end = t_start + clip_length
                if t_end > audio_duration:
                    t_end = ceil(audio_duration)
                    segment = audio.subclip(t_start)
                else:
                    segment = audio.subclip(t_start, t_end)
                clip_name = 'audio%s.%s' % (count, file_details['extension'])
                clip_path = os.path.join(clip_folder, clip_name)
                segment.write_audiofile(clip_path, verbose=False)
                file_list.append(clip_path)
                count += 1
                t_start = t_end

    # run file transcription method
        transcription_result = self._transcribe_files(file_list, file_details['mimetype'])

    # remove temp files
        if len(file_list) > 1:
            from labpack.records.settings import remove_settings
            for file in file_list:
                remove_settings(file, remove_dir=True)

        return transcription_result
예제 #35
0
import os
from conf import SAMPLE_INPUTS, SAMPLE_OUTPUTS
from moviepy.editor import VideoFileClip, AudioFileClip, TextClip, CompositeVideoClip, concatenate_videoclips
from PIL import Image

org_video_path = input("Enter the video path: ")
final_video_path = input("Enter the output folder path: ")
final_video_name = input("Enter the final video name: ")
audio_path = input("Enter the final video name: ")
watermark = input("Enter the watermark: ")

final_video_path = os.path.join(final_video_path, final_video_name)

video_clip = VideoFileClip(org_video_path)
audio_clip = AudioFileClip(audio_path)
final_audio = audio_clip.subclip(25, 40)

w, h = video_clip.size
fps = video_clip.fps

intro_duration = 5
intro_text = TextClip("Hello world!",
                      fontsize=70,
                      color='white',
                      size=video_clip.size)
intro_text = intro_text.set_duration(intro_duration)
intro_text = intro_text.set_fps(fps)
intro_text = intro_text.set_pos("center")

# to add audio to your intro:
# parse the arguments
args = parser.parse_args()
video_file = args.video_file
audio_file = args.audio_file
start = args.start
end = args.end
composite = args.composite
volume_factor = args.volume_factor
# print the passed parameters, just for logging
print(vars(args))
# load the video
video_clip = VideoFileClip(video_file)
# load the audio
audio_clip = AudioFileClip(audio_file)
# use the volume factor to increase/decrease volume
audio_clip = audio_clip.volumex(volume_factor)
# if end is not set, use video clip's end
if not end:
    end = video_clip.end
# make sure audio clip is less than video clip in duration
# setting the start & end of the audio clip to `start` and `end` paramters
audio_clip = audio_clip.subclip(start, end)
# composite with the existing audio in the video if composite parameter is set
if composite:
    final_audio = CompositeAudioClip([video_clip.audio, audio_clip])
else:
    final_audio = audio_clip
# add the final audio to the video
final_clip = video_clip.set_audio(final_audio)
# save the final clip
final_clip.write_videofile("final.mp4")