Пример #1
0
    def download_Audio(self):
        yt_link = self.linkAudio.get()
        try:
            if yt_link == '' or yt_link == None:
                messagebox.showerror(f"Aviso", "Campo Vazio")
            else:
                path_audio = 'C:/Temp/Musicas'

                yt = YouTube(yt_link)
                audio = yt.streams.filter(only_audio=True)[0]

                mp4_file = audio.download(path_audio)
                mp3_file = audio.default_filename[:-4] + ".mp3"

                videoClip = AudioFileClip(mp4_file, fps=44100)
                audioclip = videoClip
                audioclip.write_audiofile(path_audio + '/' + mp3_file,
                                          fps=44100)

                os.remove(path_audio + '/' + audio.default_filename)

                videoClip.close()
                audioclip.close()

                messagebox.showinfo(f"Aviso", "Download Completo!")
                self.linkAudio.delete(0, END)
        except:
            messagebox.showerror(f"Erro", "Arquivo invalido para Download!")
Пример #2
0
class AudioStim(Stim):
    ''' Represents an audio clip. For now, only handles wav files.
    Args:
        filename (str): Path to audio file.
        onset (float): Optional onset of the audio file (in seconds) with
            respect to some more general context or timeline the user wishes
            to keep track of.
        sampling_rate (int): Sampling rate of clip, in hertz.

    '''
    def __init__(self,
                 filename=None,
                 onset=None,
                 sampling_rate=44100,
                 url=None,
                 clip=None):
        if url is not None:
            filename = url
        self.filename = filename
        self.sampling_rate = sampling_rate
        self.clip = clip

        if self.clip is None:
            self._load_clip()

        # Small default buffer isn't ideal, but moviepy has persistent issues
        # with some files otherwise; see
        # https://github.com/Zulko/moviepy/issues/246
        self.data = self.clip.to_soundarray(buffersize=1000)
        duration = self.clip.duration

        if self.data.ndim > 1:
            # Average channels to make data mono
            self.data = self.data.mean(axis=1)

        super(AudioStim, self).__init__(filename,
                                        onset=onset,
                                        duration=duration)

    def _load_clip(self):
        self.clip = AudioFileClip(self.filename, fps=self.sampling_rate)

    def __getstate__(self):
        d = self.__dict__.copy()
        d['clip'] = None
        return d

    def __setstate__(self, d):
        self.__dict__ = d
        self._load_clip()

    @contextmanager
    def get_filename(self):
        if self.filename is None or not os.path.exists(self.filename):
            tf = tempfile.mktemp() + '.wav'
            self.clip.write_audiofile(tf)
            yield tf
            os.remove(tf)
        else:
            yield self.filename
Пример #3
0
 def handleDLToMp4(self):
     try:
         appStatus.set('[1/2] Downloading...')
         root.update()
         getYTVideo = YouTube(ytLink.get())
         composedFilePath = f'{self.usrDownloadPath}{sep}{getYTVideo.title}.mp4'
         getYTVideo.streams.filter(adaptive=True,
                                   type='video').first().download(
                                       self.usrDownloadPath,
                                       filename='tmpVidFile')
         getYTVideo.streams.filter(adaptive=True,
                                   type='audio').first().download(
                                       self.usrDownloadPath,
                                       filename='tmpAudFile')
         tmpVideoFile = VideoFileClip(self.tmpVideoFilePath)
         tmpAudioFile = AudioFileClip(self.tmpAudioFilePath)
         appStatus.set('[2/2] Converting & mounting file...')
         ytLink.set('This step may take some minutes')
         root.update()
         mountClip = tmpVideoFile.set_audio(tmpAudioFile)
         mountClip.write_videofile(composedFilePath, fps=30)
         tmpVideoFile.close()
         tmpAudioFile.close()
         remove(self.tmpVideoFilePath)
         remove(self.tmpAudioFilePath)
         appStatus.set('Done!')
         ytLink.set('Check your "Downloads" directory.')
         root.update()
     except Exception as e:
         print(e)
         appStatus.set('Whoops, something went wrong!')
         ytLink.set(value='Invalid link!')
         root.update()
Пример #4
0
    def run(self, outputVideoName):
        # 对加速点迭代加速
        BgmName = 'BGM/model2-BGM.wav'
        for breakPoint in self.breakPoints:
            print(' *' * 12, '正在处理:', breakPoint, ' *' * 12)
            END = self.clip.end

            clipFirst = self.clip.subclip(self.START,
                                          breakPoint - self.M / self.speedRate)
            clipMiddle = speedx(self.clip.subclip(
                breakPoint - self.M / self.speedRate,
                breakPoint - self.M / self.speedRate + self.M),
                                factor=self.speedRate)
            clipLast = self.clip.subclip(
                breakPoint - self.M / self.speedRate + self.M, END)

            self.clip = concatenate_videoclips(
                [clipFirst, clipMiddle, clipLast])

        length = 0
        sound = AudioFileClip(BgmName)

        if self.clip.end < sound.end:
            length = self.clip.end
            sound = sound.subclip(0, length)
        else:
            length = sound.end
            self.clip = self.clip.subclip(0, length)

        self.clip = self.clip.set_audio(sound)
        self.clip.fps = 30
        self.writeClip(outputVideoName)
Пример #5
0
    def __init__(
        self,
        filename,
        has_mask=False,
        audio=True,
        audio_buffersize=200000,
        target_resolution=None,
        resize_algorithm="bicubic",
        audio_fps=44100,
        audio_nbytes=2,
        fps_source="tbr",
    ):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(
            filename,
            pix_fmt=pix_fmt,
            target_resolution=target_resolution,
            resize_algo=resize_algorithm,
            fps_source=fps_source,
        )

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.rotation = self.reader.rotation

        self.filename = filename

        if has_mask:

            self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3]

            def mask_mf(t):
                return self.reader.get_frame(t)[:, :, 3] / 255.0

            self.mask = VideoClip(
                ismask=True, make_frame=mask_mf).set_duration(self.duration)
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos["audio_found"]:

            self.audio = AudioFileClip(
                filename,
                buffersize=audio_buffersize,
                fps=audio_fps,
                nbytes=audio_nbytes,
            )
Пример #6
0
def test_audio_coreader():
    if sys.platform.startswith("win"):
        pytest.skip("Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error")

    sound = AudioFileClip("media/crunching.mp3")
    sound = sound.subclip(1, 4)
    sound2 = AudioFileClip("media/crunching.mp3")
    sound2.write_audiofile(os.path.join(TMP_DIR, "coreader.mp3"))
Пример #7
0
def test_audiofileclip_concat():
    sound = AudioFileClip("media/crunching.mp3")
    sound = sound.subclip(1, 4)

    # Checks it works with videos as well
    sound2 = AudioFileClip("media/big_buck_bunny_432_433.webm")
    concat = concatenate_audioclips((sound, sound2))

    concat.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))
Пример #8
0
def test_audiofileclip_concat():
    sound = AudioFileClip("media/crunching.mp3")
    sound = sound.subclip(1, 4)

    # Checks it works with videos as well
    sound2 = AudioFileClip("media/big_buck_bunny_432_433.webm")
    concat = concatenate_audioclips((sound, sound2))

    concat.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))
Пример #9
0
def video2Audio(video_file):
    '''Takes in any extension supported by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov, etc'''
    audio = AudioFileClip(video_file, nbytes=2, fps=16000)
    sound_array = audio.to_soundarray(fps=16000, quantize=True, nbytes=2)

    if audio.nchannels == 2:
        sound_array = sound_array.sum(axis=1) / 2
        sound_array = sound_array.astype(np.int16)

    return sound_array, audio.duration
    def convert(self, listFiles):  # convert mp4 file to mp3

        for file in listFiles:

            root, ext = os.path.splitext(file)
            if ext == '.mp4':
                mp4_path = os.path.join(self.pathFolder, file)
                mp3_path = os.path.join(self.pathFolder, root + '.mp3')
                new_file = AudioFileClip(mp4_path)
                new_file.write_audiofile(mp3_path)
                os.remove(mp4_path)
Пример #11
0
    def read_single_wav(self, path):
        clip = AudioFileClip(str(path))

        subsampled_audio = clip.set_fps(16000)
        chunk_size = 640

        audio = np.array(list(subsampled_audio.iter_frames())).mean(1)
        audio = np.pad(audio, (0, chunk_size - audio.shape[0] % chunk_size),
                       'constant')
        audio = audio.reshape(-1, chunk_size)

        return audio.astype(np.float32)
Пример #12
0
def test_audiofileclip_concat():
    if sys.platform.startswith("win"):
        pytest.skip("Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error")

    sound = AudioFileClip("media/crunching.mp3")
    sound = sound.subclip(1, 4)

    # Checks it works with videos as well
    sound2 = AudioFileClip("media/big_buck_bunny_432_433.webm")
    concat = concatenate_audioclips((sound, sound2))

    concat.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))
Пример #13
0
 def add_music(
     self
 ):  #Creates a new video file from the temporary output file and adds the selected audio the the background.
     my_clip = VideoFileClip("".join(self.save_path.split(".")[:-1]) +
                             "_tmp.avi")
     music = AudioFileClip(self.mus_path)
     if my_clip.duration > music.duration:
         duration = music.duration
     else:
         duration = my_clip.duration
     my_clip = my_clip.set_audio(music.set_duration(duration))
     my_clip.write_videofile(self.save_path, fps=self.fps)
def plot(audio_path, plot_path, prediction, sample_time=None, fps=30):
    render_animation(fps, output='temp.mp4', azim=75, prediction=prediction)
    if sample_time != None:
        audioclip = AudioFileClip(audio_path,
                                  fps=44100).subclip(sample_time[0],
                                                     sample_time[1])
    else:
        audioclip = AudioFileClip(audio_path, fps=44100)
    videoclip = VideoFileClip('temp.mp4')
    videoclip.audio = audioclip
    videoclip.write_videofile(plot_path, fps=fps)
    os.remove('temp.mp4')
Пример #15
0
def test_concatenate_audiofileclips():
    clip1 = AudioFileClip("media/crunching.mp3").subclip(1, 4)

    # Checks it works with videos as well
    clip2 = AudioFileClip("media/big_buck_bunny_432_433.webm")
    concat_clip = concatenate_audioclips((clip1, clip2))

    concat_clip.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))

    assert concat_clip.duration == clip1.duration + clip2.duration

    close_all_clips(locals())
Пример #16
0
 def mash(self,audioPath):
     readPath=self.path + "/mashup1"+".mp4"
     mashup=VideoFileClip(readPath)
     if(self.numMashups==1):
         mashup=mashup.set_audio(AudioFileClip(audioPath))
         mashup.write_videofile(self.path+"/mashup.mp4")
     else:
         for i in range(2,self.numMashups+1):
             readPath=self.path + "/mashup"+str(i)+".mp4"
             tempClip=VideoFileClip(readPath)
             mashup=concatenate_videoclips([mashup,tempClip])
         mashup=mashup.set_audio(AudioFileClip(audioPath))
         mashup.write_videofile(self.path+"/mashup.mp4")
Пример #17
0
    def add_sfx(self, clip):
        logger.info('Adding sfx...')
        new_audio = [clip.audio]
        for i in range(self.sfx_num):
            sfx_path = config.SFX_PATH + str(i) + '.mp3'
            sfx_clip = AudioFileClip(sfx_path)

            if sfx_clip.duration > clip.duration:
                sfx_clip = sfx_clip.set_duration(clip.duration)
                new_audio.append(sfx_clip)
            else:
                new_audio.append(sfx_clip)

        return clip.set_audio(CompositeAudioClip(new_audio))
Пример #18
0
def test_audiofileclip_concat():
    if sys.platform.startswith("win"):
        pytest.skip(
            "Temporarily skipping on windows because otherwise test suite fails with Invalid Handle Error"
        )

    sound = AudioFileClip("media/crunching.mp3")
    sound = sound.subclip(1, 4)

    # Checks it works with videos as well
    sound2 = AudioFileClip("media/big_buck_bunny_432_433.webm")
    concat = concatenate_audioclips((sound, sound2))

    concat.write_audiofile(os.path.join(TMP_DIR, "concat_audio_file.mp3"))
Пример #19
0
def test_issue_470():
    audio_clip = AudioFileClip("media/crunching.mp3")

    # end_time is out of bounds
    subclip = audio_clip.subclip(start_time=6, end_time=9)

    with pytest.raises(IOError):
        subclip.write_audiofile(os.path.join(TMP_DIR, "issue_470.wav"),
                                write_logfile=True)

    # but this one should work..
    subclip = audio_clip.subclip(start_time=6, end_time=8)
    subclip.write_audiofile(os.path.join(TMP_DIR, "issue_470.wav"),
                            write_logfile=True)
    def cut_audio_from_video(self):
        """
        a method to get the audio from a video and save it in the project folder
        and create a object of this

        @return: a audio object which contains the path
        """
    
        folder = Path(self.folder_path, self.folder_name)
        audio_from_video = 'audio.mp3'
        audio = AudioFileClip(self.video_data)
        audio.write_audiofile(os.path.join(folder, audio_from_video), verbose=False, logger=None)
        extracted_audio = Path(folder, audio_from_video)
        self.audio_files.append(extracted_audio)
        return Audio(extracted_audio)
Пример #21
0
 def create_video(dependencies, targets):
     backing_track_path = output_dir_path / 'accompaniment.wav'
     with open(sync_map_path(output_dir_path), encoding='utf-8') as sync_json_file, \
         open(silences_path(output_dir_path), encoding='utf-8') as silence_json_file:
         lyric_clips = list(
             _generate_lyric_clips(
                 json.load(sync_json_file),
                 json.load(silence_json_file)
             )
         )
     backing_track_clip = AudioFileClip(str(backing_track_path))
     background_clip = ColorClip(
         size=(1024, 768), color=[0, 0, 0],
         duration=backing_track_clip.duration
     )
     karaoke = (
         CompositeVideoClip([background_clip] + lyric_clips).
         set_duration(backing_track_clip.duration).
         set_audio(backing_track_clip)
     )
     karaoke.write_videofile(
         str(targets[0]),
         fps=10,
         # Workaround for missing audio
         # https://github.com/Zulko/moviepy/issues/820
         codec='libx264',
         audio_codec='aac',
         temp_audiofile='temp-audio.m4a',
         remove_temp=True
     )
Пример #22
0
def video():
    if 1:
        snd = AudioFileClip("space.mp3")
        clip = VideoClip(c.animation, duration=snd.duration / 30.)

        clip = clip.set_audio(snd).set_duration(snd.duration / 30.)
        clip.write_videofile('cam.mp4', fps=24)
Пример #23
0
def add_voice(video_path, term):
    videoclip = VideoFileClip(video_path)
    audioclip = AudioFileClip(speech(term))
    new_audioclip = CompositeAudioClip([videoclip.audio, audioclip])
    videoclip.audio = new_audioclip
    videoclip.write_videofile(video_path)
    os.remove('/home/benjamim/PycharmProjects/Nyte/ImagesAndSounds/' + term[0:3] + '.mp3')
Пример #24
0
def create_videoclip(frames, duration, frame_rate, audio_in=None):
    """
    Create a VideoClip object
    :param frames: a iterator returning numpy frame objects
    :param duration: Duration of clip in seconds
    :param audio_in: file name of audio file, or None
    :return:
    """
    def make_frame(t):
        nonlocal current_frame
        nonlocal current_frame_index
        required_frame_index = int(t * frame_rate)
        if required_frame_index > current_frame_index:
            current_frame = next(frames)
            current_frame_index += 1
        rgb_frame = np.empty(
            (current_frame.shape[0], current_frame.shape[1], 3),
            dtype=np.uint8)
        rgb_frame[:, :] = current_frame[:, :, 0:3]
        return rgb_frame

    current_frame = next(frames)
    current_frame_index = 0
    video_clip = VideoClip(make_frame, duration=duration)
    if audio_in:
        print("Adding audio clip", audio_in)
        audio_clip = AudioFileClip(audio_in).subclip(0, duration)
        video_clip = video_clip.set_audio(audio_clip)
    return video_clip
Пример #25
0
    def __init__(self,
                 filename,
                 ismask=False,
                 has_mask=False,
                 audio=True,
                 audio_buffersize=200000,
                 audio_fps=44100,
                 audio_nbytes=2,
                 verbose=False):

        VideoClip.__init__(self, ismask)

        # Make a reader
        pix_fmt = "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt)

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.get_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:
            self.audio = AudioFileClip(filename,
                                       buffersize=audio_buffersize,
                                       fps=audio_fps,
                                       nbytes=audio_nbytes)
Пример #26
0
    def saveVideo(self):
        count = 0
        four_cc = cv2.VideoWriter_fourcc(*'DIVX')
        name = QFileDialog.getSaveFileName(self, "Save File", "output.avi",
                                           "Videos(*.avi)")
        # if user doesn't select file directory
        if not name[0]:
            return

        out = cv2.VideoWriter(name[0], four_cc, self.fps,
                              (int(self.w), int(self.h)))

        self.progress.setMaximum(len(self.frameList))

        while count < len(self.frameList):
            (ret, frame) = self.frameList[count]

            if ret:
                count += 1
                out.write(frame)
                self.progress.setValue(count)
            else:
                break

        out.release()

        audio = AudioFileClip(self.name)
        video = VideoFileClip(name[0])
        result = video.set_audio(audio)
        result.write_videofile(name[0].replace(".avi", ".mp4"))
Пример #27
0
def test_PR_1137_audio():
    """
    Test support for path-like objects as arguments for AudioFileClip.
    """
    with AudioFileClip(Path("media/crunching.mp3")) as audio:
        audio.write_audiofile(Path(TMP_DIR) / "pathlike.mp3")
        assert isinstance(audio.filename, str)
Пример #28
0
def render():
    if img1 is None:
        alert = QMessageBox()
        alert.setText('64비트 이미지를 선택해주세요.')
        alert.exec_()
        return

    if not pixelized:
        alert = QMessageBox()
        alert.setText('픽셀화를 해주세요.')
        alert.exec_()
        return

    if img2 is None:
        alert = QMessageBox()
        alert.setText('128비트 이미지를 선택해주세요.')
        alert.exec_()
        return

    image = Image.open(img2)
    resize(image).save('./resource/128bit.png')
    clips = [ImageClip(f'./resource/{m}').set_duration(1) for m in ['1bit.png', '2bit.png', '4bit.png', '8bit.png']]
    clips.append(ImageClip('./resource/16bit.png').set_duration(1.6))
    clips.append(ImageClip('./resource/32bit.png').set_duration(1.8))
    clips.append(ImageClip('./resource/64bit.png').set_duration(2))
    clips.append(ImageClip('./resource/128bit.png').set_duration(1))
    concat_clip = concatenate_videoclips(clips, method="compose")
    concat_clip.audio = AudioFileClip(r"./resource/audio.mp3")
    concat_clip.write_videofile("result.mp4", fps=24)
    
    alert = QMessageBox()
    alert.setText('렌더링 완료 result.mp4가 생성되었습니다.')
    alert.exec_()
Пример #29
0
def test_setaudio_with_audiofile(util):
    clip = ColorClip(size=(100, 60), color=(255, 0, 0), duration=0.5)
    audio = AudioFileClip("media/crunching.mp3").subclip(0, 0.5)
    clip = clip.with_audio(audio)
    location = os.path.join(util.TMP_DIR, "setaudiofile.mp4")
    clip.write_videofile(location, fps=24)
    assert os.path.isfile(location)
Пример #30
0
def merge_video_audio(video_path, audio_path, outpath):
    """视频和音频合并"""
    audioclip = AudioFileClip(str(audio_path))
    videoclip = VideoFileClip(str(video_path))
    videoclip2 = videoclip.set_audio(audioclip)
    video = CompositeVideoClip([videoclip2])
    video.write_videofile(str(outpath), codec='mpeg4', fps=_fps)
Пример #31
0
def test_audioclip(util, mono_wave):
    filename = os.path.join(util.TMP_DIR, "audioclip.mp3")
    audio = AudioClip(mono_wave(440), duration=2, fps=22050)
    audio.write_audiofile(filename, bitrate="16", logger=None)

    assert os.path.exists(filename)

    AudioFileClip(filename)
Пример #32
0
    def openFile(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open Audio",
                                                  QDir.homePath(), "Audio(*.mp3;*.wav)")

        if fileName != '':
            self.pushButton_2.setEnabled(True)
            Window.audio_backend = AudioFileClip(fileName)
            self.pushButton_3.setEnabled(True)
Пример #33
0
def define_audio(op):
    """ Define an audio clip from source file.

        source - absolute path to the file
    """
    clip = AudioFileClip(op.source)

    return clip
Пример #34
0
    def __init__(self, filename, has_mask=False,
                 audio=True, audio_buffersize = 200000,
                 target_resolution=None, resize_algorithm='bicubic',
                 audio_fps=44100, audio_nbytes=2, verbose=False,
                 fps_source='tbr'):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt= "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt,
                                         target_resolution=target_resolution,
                                         resize_algo=resize_algorithm,
                                         fps_source=fps_source)

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.rotation = self.reader.rotation

        self.filename = self.reader.filename

        if has_mask:

            self.make_frame = lambda t: self.reader.get_frame(t)[:,:,:3]
            mask_mf =  lambda t: self.reader.get_frame(t)[:,:,3]/255.0
            self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
                       .set_duration(self.duration))
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:

            self.audio = AudioFileClip(filename,
                                       buffersize= audio_buffersize,
                                       fps = audio_fps,
                                       nbytes = audio_nbytes)
Пример #35
0
class VideoFileClip(VideoClip):

    """

    A video clip originating from a movie file. For instance: ::

        >>> clip = VideoFileClip("myHolidays.mp4")
        >>> clip.close()
        >>> with VideoFileClip("myMaskVideo.avi") as clip2:
        >>>    pass  # Implicit close called by contex manager.


    Parameters
    ------------

    filename:
      The name of the video file. It can have any extension supported
      by ffmpeg: .ogv, .mp4, .mpeg, .avi, .mov etc.

    has_mask:
      Set this to 'True' if there is a mask included in the videofile.
      Video files rarely contain masks, but some video codecs enable
      that. For istance if you have a MoviePy VideoClip with a mask you
      can save it to a videofile with a mask. (see also
      ``VideoClip.write_videofile`` for more details).

    audio:
      Set to `False` if the clip doesn't have any audio or if you do not
      wish to read the audio.

    target_resolution:
      Set to (desired_height, desired_width) to have ffmpeg resize the frames
      before returning them. This is much faster than streaming in high-res
      and then resizing. If either dimension is None, the frames are resized
      by keeping the existing aspect ratio.

    resize_algorithm:
      The algorithm used for resizing. Default: "bicubic", other popular
      options include "bilinear" and "fast_bilinear". For more information, see
      https://ffmpeg.org/ffmpeg-scaler.html

    fps_source:
      The fps value to collect from the metadata. Set by default to 'tbr', but
      can be set to 'fps', which may be helpful if importing slow-motion videos
      that get messed up otherwise.


    Attributes
    -----------

    filename:
      Name of the original video file.

    fps:
      Frames per second in the original file.
    
    
    Read docs for Clip() and VideoClip() for other, more generic, attributes.
    
    Lifetime
    --------
    
    Note that this creates subprocesses and locks files. If you construct one of these instances, you must call
    close() afterwards, or the subresources will not be cleaned up until the process ends.
    
    If copies are made, and close() is called on one, it may cause methods on the other copies to fail.  

    """

    def __init__(self, filename, has_mask=False,
                 audio=True, audio_buffersize = 200000,
                 target_resolution=None, resize_algorithm='bicubic',
                 audio_fps=44100, audio_nbytes=2, verbose=False,
                 fps_source='tbr'):

        VideoClip.__init__(self)

        # Make a reader
        pix_fmt= "rgba" if has_mask else "rgb24"
        self.reader = FFMPEG_VideoReader(filename, pix_fmt=pix_fmt,
                                         target_resolution=target_resolution,
                                         resize_algo=resize_algorithm,
                                         fps_source=fps_source)

        # Make some of the reader's attributes accessible from the clip
        self.duration = self.reader.duration
        self.end = self.reader.duration

        self.fps = self.reader.fps
        self.size = self.reader.size
        self.rotation = self.reader.rotation

        self.filename = self.reader.filename

        if has_mask:

            self.make_frame = lambda t: self.reader.get_frame(t)[:,:,:3]
            mask_mf =  lambda t: self.reader.get_frame(t)[:,:,3]/255.0
            self.mask = (VideoClip(ismask = True, make_frame = mask_mf)
                       .set_duration(self.duration))
            self.mask.fps = self.fps

        else:

            self.make_frame = lambda t: self.reader.get_frame(t)

        # Make a reader for the audio, if any.
        if audio and self.reader.infos['audio_found']:

            self.audio = AudioFileClip(filename,
                                       buffersize= audio_buffersize,
                                       fps = audio_fps,
                                       nbytes = audio_nbytes)

    def close(self):
        """ Close the internal reader. """
        if self.reader:
            self.reader.close()
            self.reader = None

        try:
            if self.audio:
                self.audio.close()
                self.audio = None
        except AttributeError:
            pass
Пример #36
0
class AudioStim(Stim):

    ''' Represents an audio clip.

    Args:
        filename (str): Path to audio file.
        onset (float): Optional onset of the audio file (in seconds) with
            respect to some more general context or timeline the user wishes
            to keep track of.
        sampling_rate (int): Sampling rate of clip, in hertz.
        url (str): Optional url to read contents from.
        clip (AudioFileClip): Optional moviepy AudioFileClip to initialize
            from.
        order (int): Optional sequential index of the AudioStim within some
            containing context.

    '''

    _default_file_extension = '.wav'

    def __init__(self, filename=None, onset=None, sampling_rate=None, url=None,
                 clip=None, order=None):
        if url is not None:
            filename = url
        self.filename = filename

        if clip:
            self.sampling_rate = clip.fps
            self.clip = clip
        else:
            self.sampling_rate = sampling_rate
            if not self.sampling_rate:
                self.sampling_rate = self.get_sampling_rate(self.filename)
            self._load_clip()

        # Small default buffer isn't ideal, but moviepy has persistent issues
        # with some files otherwise; see
        # https://github.com/Zulko/moviepy/issues/246
        self.data = self.clip.to_soundarray(buffersize=1000)
        duration = self.clip.duration

        if self.data.ndim > 1:
            # Average channels to make data mono
            self.data = self.data.mean(axis=1)

        super(AudioStim, self).__init__(
            filename, onset=onset, duration=duration, order=order, url=url)

    def _load_clip(self):
        self.clip = AudioFileClip(self.filename, fps=self.sampling_rate)

    @staticmethod
    def get_sampling_rate(filename):
        ''' Use moviepy/FFMPEG to get the sampling rate '''
        infos = ffmpeg_parse_infos(filename)
        fps = infos.get('audio_fps', 44100)
        if fps == 'unknown':
            fps = 44100
        return fps

    def __getstate__(self):
        d = self.__dict__.copy()
        d['clip'] = None
        return d

    def __setstate__(self, d):
        self.__dict__ = d
        self._load_clip()

    def save(self, path):
        ''' Save clip data to file.

        Args:
            path (str): Filename to save audio data to.
        '''
        self.clip.write_audiofile(path, fps=self.sampling_rate)
Пример #37
0
 def _load_clip(self):
     self.clip = AudioFileClip(self.filename, fps=self.sampling_rate)
Пример #38
0
def test_normalize():
    clip = AudioFileClip('media/crunching.mp3')
    clip = audio_normalize(clip)
    assert clip.max_volume() == 1
    close_all_clips(locals())
Пример #39
0
def test_audio_coreader():
    sound = AudioFileClip("media/crunching.mp3")
    sound = sound.subclip(1, 4)
    sound2 = sound.coreader()
    sound2.write_audiofile(os.path.join(TMP_DIR, "coreader.mp3"))
Пример #40
0
    def sync(self,fps=11025,nbytes=2,low_memory=False,print_progress=False, convert=False):
        """
        This function calculates the shift neccisary for the other cameras to be in sync
        with the first camera. It uses scipy's fftconvolve to compute the 
        cross correlation.
        :param convert: if convert is True, the audio from the video file is written to a wave file. (This uses scipy to read the file if it exists.)
        """
        # first file (refence)
        if convert:
            # only use wav if convert is on
            if os.path.exists(self.filenames[0][0]+'.wav'):
                with open(self.filenames[0][0]+'.wav','rb') as f:
                    fs,data = wavfile.read(f)
                # see if settings changed
                if fs != fps:
                    data = write_audio(self.filenames[0],fps,nbytes,overwrite=True)
            else:
                data = write_audio(self.filenames[0],fps,nbytes,overwrite=True)
                
        else:
            clip = AudioFileClip(self.filenames[0][0]+self.filenames[0][1])
            data = clip.to_soundarray(fps=fps, nbytes=nbytes)[0] #### is this right
            clip.reader.close_proc() ############### maak seker
        
        if low_memory:
            reference = np.memmap(self.filenames[0][0]+'.dat', dtype='int16', mode='w+',shape=data.shape)
            reference = data[:]
            del data
        else:
            reference = data[:]
            del data
        
        # the rest (to sync)
        shift = [] 
        for i in range(len(self.filenames)-1):
            if print_progress:
                print "Syncing "+str(i+2)+" of "+str(len(self.filenames))
            
            
            if convert:
                # only use wav if convert is on
                if os.path.exists(self.filenames[i][0]+'.wav'):
                    with open(self.filenames[i][0]+'.wav','rb') as f:
                        fs,data = wavfile.read(f)
                    # see if settings changed
                    if fs != fps:
                        data = write_audio(self.filenames[i],fps,nbytes,overwrite=True)
                else:
                    data = write_audio(self.filenames[i],fps,nbytes,overwrite=True)
                    
            else:
                clip = AudioClip(self.filenames[i][0]+self.filenames[i][1])
                data = clip.to_soundarray(fps=fps, nbytes=nbytes)[0]
                del clip.reader
                
            if low_memory:
                to_sync = np.memmap(self.filenames[i][0]+'.dat', dtype='int16', mode='w+',shape=data.shape)
                to_sync = data[:]
                del data
            else:
                to_sync = data[:] ########### neccisary? (wrong)
                del data
            
            sync_time = get_shift(reference,to_sync,fps,low_memory=low_memory)
            
            if print_progress:
                print sync_time
            shift.append( sync_time )

        self.shift = shift
        return shift
Пример #41
0
def test_normalize():
    clip = AudioFileClip('media/crunching.mp3')
    clip = audio_normalize(clip)
    assert clip.max_volume() == 1
Пример #42
0
def poop(source, destination, midi_file, stretch, fadeout, rebuild, max_stack):
    """
    Create multiple pitchshifted versions of source video and arrange them to
    the pattern of the midi_file, also arrange the video if multiple notes play
    at the same time.
    """

    print "Reading input files"
    video = VideoFileClip(source, audio=False)
    """
    Non-main tracks are 30% the size of the main and have a white border and a
    margin around them.
    """
    smaller = video.resize(0.3)\
        .margin(mar=2, color=3*[255])\
        .margin(mar=8, opacity=0)
    audio = AudioFileClip(source, fps=44100)
    mid = MidiFile(midi_file)
    ignoredtracks = ["Percussion", "Bass"]

    print "Analysing MIDI file"
    notes = []   # the number of messages in each track
    lowest = 127 # will contain the lowest note
    highest = 0  # will contain the highest note
    for i, track in enumerate(mid.tracks):
        notes.append(0)
        #if track.name in ignoredtracks: continue
        for message in track:
            if message.type == "note_on":
                lowest = min(lowest, message.note)
                highest = max(highest, message.note)
                notes[-1] += 1
    """
    The main track is the one featured in the center. It is probably the one
    with the most notes. Also record the lowest, highest, and average note to
    generate the appropriate pitches.
    """
    maintrack = max(enumerate(notes), key=lambda x: x[1])[0]
    midpitch = int((lowest+highest)/2)
    print "Main track is probably", str(maintrack)+":", mid.tracks[maintrack].name
    mid.tracks.insert(0, mid.tracks.pop(maintrack)) # move main track to front
    notes.insert(0, notes.pop(maintrack)) # move main note count to front
    print sum(notes), "notes ranging from", lowest, "to", highest, "centering around", midpitch

    print "Transposing audio"
    sound = audio.to_soundarray(fps=44100) # source, original audio
    tones = range(lowest-midpitch, highest-midpitch) # the range of pitches we need
    pitches = [] # this will contain the final AudioFileClips
    if not os.path.exists("pitches/"):
        print "Creating folder for audio files"
        os.makedirs("pitches/")
    for n in tones:
        """
        Pitches only need to be generated if they do not already exist or if
        we force the creation of new ones. Save them in order in pitches.
        """
        name = "pitches/"+source+"_"+str(n)+".mp3"
        if not os.path.isfile(name) or rebuild:
            print "Transposing pitch", n
            splitshift(sound, n).write_audiofile(name)
        pitches.append(AudioFileClip(name, fps=44100))

    print "Adding video clips"
    clips = [video.set_duration(1)] # to set the video size
    positions = [("left", "bottom"), ("right", "bottom"), ("left", "top"),
        ("right", "top"), ("center", "bottom"), ("center", "top"),
        ("left", "center"), ("right", "center")] # non-main tracks
    """
    curpos is the current corner position on the screen and changes with each track.
    cache is used to make a unique file name whenever a new temporary file is created.
    endtime will be used at the end to set the end TextClip. It is the latest time any clip ends.
    """
    curpos = -2
    cache = endtime = 0
    for i, track in enumerate(mid.tracks):
        #if track.name in ignoredtracks: continue
        print("Processing {} notes: {}".format(notes[i], track.name))
        t = 1.0 # not 0 because we added one second of original video for size
        opennotes = [] # will contain all notes that are still playing
        curpos += 1
        for message in track:
            if not isinstance(message, MetaMessage):
                message.time *= stretch
                t += message.time
                if message.type == "note_on":
                    """
                    Add a video clip with the appropriate starting time and
                    pitch. Also add an entry to opennotes (we don't know when
                    the note ends yet).
                    """
                    part = video
                    mainvid = i is 0# and len(opennotes) is 0
                    if not mainvid: part = smaller
                    part = part\
                        .set_audio(pitches[min(len(pitches)-1, max(0, message.note-lowest))])\
                        .set_start(t/1000)
                    opennotes.append((message.note, len(clips), t))
                    """
                    If this isn't the main track, the video will be smaller and
                    placed at the edge. We'll get a position for each track.
                    If there is more than one video playing in this track, it
                    will be placed slighly closer to the center.
                    """
                    if not mainvid:
                        stackheight = 6
                        part = part.set_position(positions[curpos % len(positions)])
                    clips.append(part)
                elif message.type == "note_off":
                    reference = message.note
                    index = 0
                    """
                    Find the note that ended in opennotes using the note.
                    Get the index and start time, remove it from opennotes.
                    """
                    for note in reversed(opennotes):
                        n, j, d = note
                        if n == reference:
                            index = j
                            opennotes.remove(note)
                            break
                    """
                    Get the clip for the open note, set its time to the
                    difference between time now and start time. Have it fade out
                    and update the endtime if needed.
                    """
                    clips[index] = clips[index].set_duration((t-d)/1000+fadeout)
                    clips[index] = clips[index].crossfadeout(fadeout)
                    endtime = max(endtime, t/1000+fadeout)
                if len(clips) == max_stack:
                    """
                    To save some memory, the clips in memory are emptied
                    whenever they reach a certain size. All clips that are closed
                    are merged into one file on disk.
                    """
                    upuntil = len(clips) # the first open note
                    if len(opennotes) > 0: _, upuntil, _ = opennotes[0]
                    stillopen = clips[upuntil:]
                    print "Stack reached", len(clips), "clips, merging", upuntil
                    """
                    Save a temporary file to disk with all clips we can safely
                    discard from clips.
                    """
                    newcache = destination+".temporary"+str(cache)+".mp4"
                    CompositeVideoClip(clips[:upuntil]).write_videofile(newcache)
                    cache += 1
                    """
                    Shift all opennotes' indices down by the number of clips
                    merged and saved to disk. Set clips to be the new, merged
                    clip and any leftover clips.
                    """
                    for i, note in enumerate(opennotes):
                        n, j, d = note
                        opennotes[i] = (n, j-upuntil+1, d)
                    clips = [VideoFileClip(newcache)]+stillopen

    end = TextClip("pitch.py", font="Arial", color="white", fontsize=70)\
        .set_pos("center")\
        .set_duration(1)\
        .set_start(endtime)
    clips.append(end) # add an ending frame

    """
    Combine all leftover clips, write them to the final file and remove
    temporary files created before.
    """
    print "Combining", len(clips), "clips"
    final = CompositeVideoClip(clips).set_start(1)
    final.write_videofile(destination)
    clips = []
    if cache == 1:
        print "Removing one temporary file"
    elif cache > 1:
        print "Removing", cache, "temporary files"
    for i in range(0, cache):
        os.remove(destination+".temporary"+str(i)+".mp4")