Пример #1
0
 def compute_thumbnail(self):
     if self.thumbnail is None:
         _, _, _, _, _, thumbnail = ExifTool.get_metadata(self.media_path)
         if thumbnail is not None:
             self.thumbnail = base64.b64decode(thumbnail[7:])
             thb = Image.open(io.BytesIO(self.thumbnail))
             thb.thumbnail((100, 100), resample=NEAREST)
             bytes_output = io.BytesIO()
             thb.save(bytes_output, format='JPEG')
             self.thumbnail = bytes_output.getvalue()
         elif self.extension in IMAGE_TYPE:
             image = Image.open(self.media_path)
             image.thumbnail((100, 100), resample=NEAREST)
             bytes_output = io.BytesIO()
             if image.mode in ("RGBA", "P"):
                 image = image.convert("RGB")
             image.save(bytes_output, format='JPEG')
             self.thumbnail = bytes_output.getvalue()
         else:
             clip = None
             try:
                 clip = VideoFileClip(self.media_path)
                 frame_at_second = 0
                 frame = clip.get_frame(frame_at_second)
                 new_image = Image.fromarray(frame)
                 new_image.thumbnail((100, 100), resample=NEAREST)
                 bytes_output = io.BytesIO()
                 new_image.save(bytes_output, format='JPEG')
                 self.thumbnail = bytes_output.getvalue()
             finally:
                 if clip is not None:
                     clip.close()
Пример #2
0
def clip(args):
    '''Subprogram for taking a portion of a video'''

    help(
        '{script} [--help] [--threads THREADS] [--output OUTPUT_FILE] [--mute] INPUT_FILE START_TIME END_TIME'
    )
    value_flags = ['-o', '--output', '-t', '--threads']
    input_file = unnamed_value(0, value_flags, argl=args)
    start_time = unnamed_value(1, value_flags, argl=args)
    end_time = unnamed_value(2, value_flags, argl=args)
    mute = has_any(['-m', '--mute'])

    output_file = value_following_any(['-o', '--output'], argl=args)
    if output_file is None:
        _, ext = splitext(input_file)
        output_file = generate_filename(ext)

    threads = 8
    if has_any(['-t', '--threads'], argl=args):
        threads = value_following_any(['-t', '--threads'], argl=args)

    clip = VideoFileClip(input_file)
    subclip = clip.subclip(start_time, end_time)
    subclip.write_videofile(output_file, threads=threads, audio=not mute)
    clip.close()
    subclip.close()
Пример #3
0
def job(fn, wid):
    vid = Video(fn)
    vidit = vid.iter_frames()
    data = None
    ret = [np.array([0])]
    i = 0
    buf = 100
    cnt = 0
    for fr, last in tqdm(lookahead(vidit), total=int(vid.duration*vid.fps), position=wid, desc='Thread {0}'.format(wid), leave=False, disable=True):
        if data is None:
            data = np.zeros((buf, *fr.shape), dtype=np.uint8)
        data[i] = fr
        i += 1
        if i == buf or last:
            data = data[:i]  # when last=True i may be <buf
            res = skv.scenedet(data, method='edges', parameter1=0.7)
            ret.append(res[1:]+(buf-1)*cnt)
            cnt += 1
            if not last:
                data_ = np.zeros((buf, *fr.shape), dtype=np.uint8)
                data_[0] = data[-1]
                i = 1
                data = data_
    vid.close()
    return np.concatenate(ret)
Пример #4
0
def delmp4(filename,time1=0,time2=0,mv=0):
#剪切视频文件 路径文件名 去片头  去片尾  到片尾时间


    clip = VideoFileClip(filename)
    clip_len=clip.duration
    if time2==0 and mv>0:
         mvtime=mv
    else:
         mvtime=int(clip_len-time2)  
    if (clip_len-time1-time2)>0 and time1<clip_len and time2<clip_len and mv<clip_len:
    #if ((clip_len-time1-time2)>0 and mvtime==mv)  or (mv>time1 and mv<clip_len and mv>time2) :
        mvclip=clip.subclip(time1,mvtime)
        file_name = os.path.splitext(filename)[0]
        filen= os.path.basename(filename)
        mvclip.write_videofile(work_path+"\\"+filen)
        mvclip.close()
        #防止内存溢出,用完关闭!!!
    else:
        print("视频长度不够!!!!")
        if not os.path.isdir(work_path):
            os.makedirs(work_path)
        fo = open(work_path+"/错误!视频长度不够.txt", "a+")
        fo.write( filename+"\n")
        fo.close()
        #shutil.copy(file_name,len_path) 
        clip.close()    
Пример #5
0
 def handleDLToMp4(self):
     try:
         appStatus.set('[1/2] Downloading...')
         root.update()
         getYTVideo = YouTube(ytLink.get())
         composedFilePath = f'{self.usrDownloadPath}{sep}{getYTVideo.title}.mp4'
         getYTVideo.streams.filter(adaptive=True,
                                   type='video').first().download(
                                       self.usrDownloadPath,
                                       filename='tmpVidFile')
         getYTVideo.streams.filter(adaptive=True,
                                   type='audio').first().download(
                                       self.usrDownloadPath,
                                       filename='tmpAudFile')
         tmpVideoFile = VideoFileClip(self.tmpVideoFilePath)
         tmpAudioFile = AudioFileClip(self.tmpAudioFilePath)
         appStatus.set('[2/2] Converting & mounting file...')
         ytLink.set('This step may take some minutes')
         root.update()
         mountClip = tmpVideoFile.set_audio(tmpAudioFile)
         mountClip.write_videofile(composedFilePath, fps=30)
         tmpVideoFile.close()
         tmpAudioFile.close()
         remove(self.tmpVideoFilePath)
         remove(self.tmpAudioFilePath)
         appStatus.set('Done!')
         ytLink.set('Check your "Downloads" directory.')
         root.update()
     except Exception as e:
         print(e)
         appStatus.set('Whoops, something went wrong!')
         ytLink.set(value='Invalid link!')
         root.update()
Пример #6
0
def main(source):
    print('要剪切的视频是', source[1])  # 获取需要剪切的文件
    source = source[1]
    time = input('请输入开始和结束的时间:\n')  # 获取开始剪切时间
    time = time.split()
    times = []
    while time != []:

        start_time = time.pop(0)
        stop_time = time.pop(0)
        times.append([start_time, stop_time])

    print("子视频命名方式为原名称_段数.mp4")
    temp = source.split('.')
    target = []
    num_of_cuts = len(times)
    for i in list(range(1, num_of_cuts + 1)):
        target.append(temp[0] + str(i) + '.' + temp[1])

    # print('**--**',target)
    video = VideoFileClip(source)
    for i in list(range(0, num_of_cuts, 1)):
        current_time = times[i]
        print('cutting...\n')

        temp = list(map(int, current_time[0].split(":")))
        start_time = reduce(to_seconds, temp)
        temp = list(map(int, current_time[1].split(":")))
        stop_time = reduce(to_seconds, temp)
        new_video = video.subclip(start_time, stop_time)  # 执行剪切操作
        new_video.to_videofile(target[i], fps=29, remove_temp=True)  # 输出文件

    video.close()
    print('done ^_^\n')
Пример #7
0
    def download_video_to_file(self, bot: Bot, document: Document, file_object: BufferedWriter, file_object_path: str):
        """Download Sticker as images to file_object

        Args:
            bot (:obj:`telegram.bot.Bot`): Telegram Api Bot Object.
            document (:obj:`telegram.document.Document`): A Telegram API Document object
            file_object (:obj:`io.BufferedWriter`): Actual existing file object
            file_object_path (:obj:`str`): The path to the file given in file_object
        """
        video = bot.getFile(document.file_id)

        with CustomNamedTemporaryFile() as video_file:
            video.download(out=video_file)
            video_file.close()
            video_clip = VideoFileClip(video_file.name, audio=False)

            video_clip.write_gif(file_object_path)
            video_clip.close()

            dirname = os.path.dirname(file_object_path)
            file_name = os.path.splitext(file_object_path)[0]
            compressed_gif_path = os.path.join(dirname, file_name + '-min.gif')

            os.system('gifsicle -O3 --lossy=50 -o {dst} {src}'.format(dst=compressed_gif_path, src=file_object_path))
            compressed_gif_path = compressed_gif_path if os.path.isfile(compressed_gif_path) else ''
            return file_object, file_object_path, compressed_gif_path
Пример #8
0
def main(ass):
    yt = YouTube(str(ass))
    image = yt.thumbnail_url
    yt = yt.streams.filter(progressive=True, file_extension="mp4")
    fileName = yt.first().default_filename
    reshaped_text = arabic_reshaper.reshape(fileName)    
    bidi_text = get_display(reshaped_text)          
    print("Start Download: " + bidi_text)
    yt.first().download()
    sleep(2)
    print("Finish Download the video")
    video = VideoFileClip(fileName)
    fileNameMp3 = fileName.replace("mp4", "mp3")
    video.audio.write_audiofile(fileNameMp3)
    video.close()
    sleep(2)
    audiofile = eyed3.load(fileNameMp3)

    if (audiofile.tag == None):
        audiofile.initTag()
    response = get(image)
    audiofile.tag.images.set(3, response.content, 'image/jpeg')

    audiofile.tag.save()
    move("./"+fileName, "./mp4s/"+fileName)
    pathToSaveMp3 = "Music/iTunes/iTunes Media/Automatically Add to iTunes/"+fileNameMp3
    move("./"+fileNameMp3, os.path.join(os.path.expanduser("~"), pathToSaveMp3))
    return
Пример #9
0
def make_video():
    result_files = os.listdir(result_directory)
    height, width, channels = cv2.imread('{0}/{1}'.format(
        result_directory, result_files[0])).shape

    out = cv2.VideoWriter("video.avi",
                          cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                          float(1 / (15 / int(len(result_files)))),
                          (width, height))  # создаем видео

    for i in result_files:
        out.write(cv2.imread('{0}/{1}'.format(result_directory, i)))

    out.release()  # генерируем
    cv2.destroyAllWindows()  # завершаем

    my_clip = mpe.VideoFileClip('video.avi')
    my_clip.write_videofile('res_video/result_video.mp4',
                            audio='music/sunny.mp3',
                            codec='mpeg4')

    my_clip.close()
    clip = VideoFileClip("res_video/result_video.mp4").subclip(0, 15)
    clip.write_videofile("res_video/result_video.mp4", codec='mpeg4')
    clip.close()
Пример #10
0
def test_release_of_file_via_close():
    # Create a random video file.
    red = ColorClip((256, 200), color=(255, 0, 0))
    green = ColorClip((256, 200), color=(0, 255, 0))
    blue = ColorClip((256, 200), color=(0, 0, 255))

    red.fps = green.fps = blue.fps = 10

    # Repeat this so we can see no conflicts.
    for i in range(3):
        # Get the name of a temporary file we can use.
        local_video_filename = os.path.join(
            TMP_DIR,
            "test_release_of_file_via_close_%s.mp4" % int(time.time()))

        clip = clips_array([[red, green, blue]]).with_duration(0.5)
        clip.write_videofile(local_video_filename)

        # Open it up with VideoFileClip.
        video = VideoFileClip(local_video_filename)
        video.close()
        clip.close()

        # Now remove the temporary file.
        # This would fail on Windows if the file is still locked.

        # This should succeed without exceptions.
        os.remove(local_video_filename)

    red.close()
    green.close()
    blue.close()
Пример #11
0
def test_failure_to_release_file():
    """ This isn't really a test, because it is expected to fail.
        It demonstrates that there *is* a problem with not releasing resources when running on 
        Windows.

        The real issue was that, as of movepy 0.2.3.2, there was no way around it.

        See test_resourcerelease.py to see how the close() methods provide a solution.
    """

    # Get the name of a temporary file we can use.
    local_video_filename = join(
        TMP_DIR, "test_release_of_file_%s.mp4" % int(time.time())
    )

    # Repeat this so we can see that the problems escalate:
    for i in range(5):

        # Create a random video file.
        red = ColorClip((256, 200), color=(255, 0, 0))
        green = ColorClip((256, 200), color=(0, 255, 0))
        blue = ColorClip((256, 200), color=(0, 0, 255))

        red.fps = green.fps = blue.fps = 30
        video = clips_array([[red, green, blue]]).set_duration(1)

        try:
            video.write_videofile(local_video_filename)

            # Open it up with VideoFileClip.
            clip = VideoFileClip(local_video_filename)

            # Normally a client would do processing here.

            # All finished, so delete the clipS.
            clip.close()
            video.close()
            del clip
            del video

        except IOError:
            print(
                "On Windows, this succeeds the first few times around the loop"
                " but eventually fails."
            )
            print("Need to shut down the process now. No more tests in" "this file.")
            return

        try:
            # Now remove the temporary file.
            # This will fail on Windows if the file is still locked.

            # In particular, this raises an exception with PermissionError.
            # In  there was no way to avoid it.

            remove(local_video_filename)
            print("You are not running Windows, because that worked.")
        except OSError:  # More specifically, PermissionError in Python 3.
            print("Yes, on Windows this fails.")
    def _get_frames(self):
        """
        Extract frames from the video container
        Returns:
            frames(tensor or list): A tensor of extracted frames from a video or a list of images to be processed
        """
        if self.read_vid_file:
            path_to_vid = (
                os.path.join(self.vid_path, self.vid_id) + self.cfg.DATA.VID_FILE_EXT
            )
            assert os.path.exists(path_to_vid), "{} file not found".format(path_to_vid)

            try:
                # Load video
                # self.video_container = container.get_video_container(path_to_vid)
                video_clip = VideoFileClip(path_to_vid, audio=False, fps_source="fps")

            except Exception as e:
                logger.info(
                    "Failed to load video from {} with error {}".format(path_to_vid, e)
                )

            frames = None

            for in_frame in video_clip.iter_frames(fps=self.cfg.DATA.IN_FPS):
                in_frame = cv2.resize(
                    in_frame,
                    (self.sample_width, self.sample_height),
                    interpolation=cv2.INTER_LINEAR,
                )

                crop_wstart = int((self.sample_width - self.cfg.DATA.TEST_CROP_SIZE)/2)
                crop_wend = crop_wstart + self.cfg.DATA.TEST_CROP_SIZE
                crop_hstart = int((self.sample_height- self.cfg.DATA.TEST_CROP_SIZE)/2)
                crop_hend = crop_hstart + self.cfg.DATA.TEST_CROP_SIZE

                in_frame = in_frame[crop_hstart:crop_hend, crop_wstart:crop_wend]

                if frames is None:
                    frames = [in_frame, ]
                else:
                    frames.append(in_frame)

            frames = np.array(frames)
            frames = self._pre_process_frame(frames)
            video_clip.close()

            return frames

        else:
            path_to_frames = os.path.join(self.vid_path, self.vid_id)
            frames = sorted(
                filter(
                    lambda x: x.endswith(self.cfg.DATA.IMG_FILE_EXT),
                    os.listdir(path_to_frames),
                ),
                key=lambda x: parse(self.cfg.DATA.IMG_FILE_FORMAT, x)[0],
            )
            return frames
Пример #13
0
def job(item):
    fn, indir, outdir = item
    outdir = os.path.splitext(fn.replace(indir, outdir))[0]
    if not os.path.isdir(outdir):
        os.makedirs(outdir)
        vid = Video(fn)
        vid.write_images_sequence(os.path.join(outdir, '%06d.bmp'), fps=8, verbose=False, logger=None)
        vid.close()
Пример #14
0
def test_failure_to_release_file():
    """ This isn't really a test, because it is expected to fail.
        It demonstrates that there *is* a problem with not releasing resources when running on 
        Windows.

        The real issue was that, as of movepy 0.2.3.2, there was no way around it.

        See test_resourcerelease.py to see how the close() methods provide a solution.
    """

    # Get the name of a temporary file we can use.
    local_video_filename = join(
        TMP_DIR, "test_release_of_file_%s.mp4" % int(time.time()))

    # Repeat this so we can see that the problems escalate:
    for i in range(5):

        # Create a random video file.
        red = ColorClip((256, 200), color=(255, 0, 0))
        green = ColorClip((256, 200), color=(0, 255, 0))
        blue = ColorClip((256, 200), color=(0, 0, 255))

        red.fps = green.fps = blue.fps = 30
        video = clips_array([[red, green, blue]]).set_duration(1)

        try:
            video.write_videofile(local_video_filename)

            # Open it up with VideoFileClip.
            clip = VideoFileClip(local_video_filename)

            # Normally a client would do processing here.

            # All finished, so delete the clipS.
            clip.close()
            video.close()
            del clip
            del video

        except IOError:
            print(
                "On Windows, this succeeds the first few times around the loop"
                " but eventually fails.")
            print("Need to shut down the process now. No more tests in"
                  "this file.")
            return

        try:
            # Now remove the temporary file.
            # This will fail on Windows if the file is still locked.

            # In particular, this raises an exception with PermissionError.
            # In  there was no way to avoid it.

            remove(local_video_filename)
            print("You are not running Windows, because that worked.")
        except OSError:  # More specifically, PermissionError in Python 3.
            print("Yes, on Windows this fails.")
Пример #15
0
def scarica(link, formato):

    global converto, errori_sistema, errore_testo, errore_internet

    try:
        urllib.request.urlopen("http://google.com")
    except:
        errore_internet = True
    else:
        
        path_to_download_folder = str(os.path.join(Path.home(), "Downloads"))

        try:
            video = pytube.YouTube(str(link)).streams.first()
        except Exception as e:
            errori_sistema = True
            errore_testo = str(e)
        
        except Exception as e:
            errori_sistema = True
            errore_testo = str(e)
        else:
            print("Video Trovato!")
            titolo = video.title

            titolo_finito = ""

            for character in titolo:

                if character.isalnum():

                    titolo_finito += character
                
                else:
                    titolo_finito += " "

            try:
                video.download(path_to_download_folder, str(titolo_finito))
            except Exception as e:
                errore_testo = e
                errori_sistema = True

            if formato == "mp3":

                converto = True

                path_mp4 =  os.path.join(path_to_download_folder, titolo_finito + '.mp4')
                path_mp3 =  path_to_download_folder + '\\' + titolo_finito + '.mp3'

                print("Converto in mp3 ...")
                
                video = VideoFileClip(path_mp4)

                audio = video.audio
                audio.write_audiofile(path_mp3)
                audio.close()
                video.close()
                os.remove(path_mp4)
Пример #16
0
def addlogo(file_dir,img="",time=20,X=30,Y=30):
   
    clip = VideoFileClip(file_dir)
    img_clip = ImageClip(img)   #位置
    img_clip = img_clip.set_pos((X,Y)).set_duration(time)
    clip = CompositeVideoClip([clip, img_clip])
    filen = os.path.basename(file_dir)
    clip.write_videofile(work_path+"\\"+filen)
    clip.close()
    def convert_to_mp3(self, video_path):
        """Convert an mp4 video to an mp3 audio file."""
        if magic.from_file(video_path, mime=True) != 'video/mp4':
            return

        videoclip = VideoFileClip(video_path)
        audioclip = videoclip.audio
        audioclip.write_audiofile(video_path[:-1] + "3")
        videoclip.close()
        os.remove(video_path)
Пример #18
0
def fhd_mp4(url, nv):  # download Full HD video on youtube
    os.chdir(mp4_loc)
    start1 = time.time()
    lock.acquire()
    yt = YouTube(url, on_progress_callback=progress)  #url
    current_title.config(text=yt.title)
    window.update()
    name = "[FHD]" + yt.title + ".mp4"
    name = check_name(name)
    temp_mp4 = "temp.mp4"
    temp_mp3 = "temp.mp3"
    video = yt.streams.first()
    video.download(filename="temptemp")  #360p mp4 download

    os.rename("temptemp.mp4", temp_mp4)
    mp4_360P = VideoFileClip(temp_mp4)
    mp4_360P.audio.write_audiofile(temp_mp3)  # change the 360p mp4 file to mp3
    mp4_360P.close()
    os.remove(temp_mp4)  # now we have the mp3 file only
    full_hd = yt.streams.filter(res="1080p",
                                type="video",
                                file_extension="mp4")
    if str(full_hd) == "[]":
        print_to_gui("No video resolution for 1080p.")
        os.remove(temp_mp3)
        return None
    video = full_hd.first()
    print_to_gui("Download Full HD Video.")
    start_d = time.time()
    video.download(filename="temptemp")  # 1080p mp4 download
    end1 = time.time()
    print_to_gui("Download complete\n")
    #=============================================================#
    os.rename("temptemp.mp4", temp_mp4)
    print_to_gui("Converting files.")
    start2 = time.time()
    if nv == "y":
        os.system(
            "ffmpeg -loglevel quiet -hwaccel cuvid -i temp.mp4 -i temp.mp3 -map 0:v -map 1:a -c:v h264_nvenc -c:a ac3 -b:v 12M output.mp4"
        )
    if nv == "n":
        os.system(
            "ffmpeg -loglevel quiet -i temp.mp4 -i temp.mp3 -map 0:v -map 1:a -c:v h264 -c:a ac3 -s 1920x1080 output.mp4"
        )
    #=============================================================#
    os.remove(temp_mp4)
    os.remove(temp_mp3)
    os.rename("output.mp4", name)
    lock.release()
    end2 = time.time()
    print_to_gui("Convert Done.")
    print_to_gui("\n\n\n1080P video downlaod time : " +
                 str(round((end1 - start_d), 2)) + "secs.\n")
    print_to_gui("Compliation : " + str(round((end2 - start2), 2)) + "secs.\n")
    print_to_gui("Total time : " + str(round((end2 - start1), 2)) + "secs.")
Пример #19
0
def test_ffmpeg_resizing():
    """Test FFmpeg resizing, to include downscaling."""
    video_file = "media/big_buck_bunny_432_433.webm"
    target_resolutions = [(128, 128), (128, None), (None, 128), (None, 256)]
    for target_resolution in target_resolutions:
        video = VideoFileClip(video_file, target_resolution=target_resolution)
        frame = video.get_frame(0)
        for (target, observed) in zip(target_resolution, frame.shape):
            if target is not None:
                assert target == observed
        video.close()
Пример #20
0
def gif(args: Namespace):
    '''Subprogram for converting videos to gif format'''

    if args.output == None:
        output_file = generate_filename('.gif')
    else:
        output_file = args.output

    clip = VideoFileClip(args.input_file)
    clip.write_gif(output_file, program='ffmpeg')
    clip.close()
Пример #21
0
def uhd_mp4(url, nv):
    start1 = time.time()
    os.chdir(mp4_loc)
    lock.acquire()
    yt = YouTube(url, on_progress_callback=progress)  #url
    current_title.config(text=yt.title)
    window.update()
    name = "[UHD]" + yt.title + ".mp4"
    name = check_name(name)
    temp_mp4 = "temp.mp4"
    temp_mp3 = "temp.mp3"
    video = yt.streams.first()
    video.download(filename="temptemp")  #360p mp4 download
    os.rename("temptemp.mp4", temp_mp4)
    mp4_360P = VideoFileClip(temp_mp4)
    mp4_360P.audio.write_audiofile(temp_mp3)  # change the 360p mp4 file to mp3
    mp4_360P.close()
    os.remove(temp_mp4)  # now we have the mp3 file only
    uhd = yt.streams.filter(res="2160p", type="video", file_extension="webm")
    if str(uhd) == "[]":
        print_to_gui("No video resolution for 4k.")
        os.remove(temp_mp3)
        return None
    video = uhd.first()
    print_to_gui("Download 4k video")
    start_d = time.time()
    video.download(filename="temptemp")  # 2160P webm download
    end1 = time.time()
    print_to_gui("Download complete\n")
    print_to_gui("================================\n" + str(end1 - start1))
    #=======================================================#
    os.rename("temptemp.webm", "temp.webm")
    print_to_gui("rename OK\n\n\n\n\n\n")
    start2 = time.time()
    if nv == "y":  #ffmpeg for NV or not
        os.system(
            "ffmpeg -loglevel quiet -hwaccel cuvid -i temp.webm -i temp.mp3 -map 0:v -map 1:a -c:v h264_nvenc -c:a ac3 -b:v 32M output.mp4"
        )
    if nv == "n":
        os.system(
            "ffmpeg -loglevel quiet -i temp.webm -i temp.mp3 -map 0:v -map 1:a -c:v h264 -c:a ac3 -s 3840x2160 output.mp4"
        )  #cpu
    os.rename("output.mp4", name)
    os.remove("temp.webm")
    print_to_gui("Convert Done.")
    #======================================================================#
    os.remove(temp_mp3)
    lock.release()
    end2 = time.time()
    print_to_gui("\n\n\n4K video downlaod time : " +
                 str(round((end1 - start_d), 2)) + "secs.\n")
    print_to_gui("Compliation : " + str(round((end2 - start2), 2)) + "secs.\n")
    print_to_gui("Total time : " + str(round((end2 - start1), 2)) + "secs.")
Пример #22
0
 def inter_and_build_sub_clip(self):
     previous_sub_clip = 0
     current_sub_clip = float(self.duration / self.max_division)
     for x in range(self.max_division):
         split_name = self.video_name.split('.')
         new_name = f'{split_name[0]}_{x}.mp4'
         self.file_names.append(new_name)
         clip = VideoFileClip(self.video_name).subclip(previous_sub_clip, current_sub_clip)
         clip.write_videofile(new_name)
         clip.close()
         previous_sub_clip = current_sub_clip
         current_sub_clip += current_sub_clip
     zip_file_path = self.create_zip_folder()
     return zip_file_path
Пример #23
0
    def vid_to_dict(self, video_name, video_file):
        print("video_file:  " + video_file)

        video = VideoFileClip(video_file)
        file = open('log.txt', 'a')
        file.writelines(str(e))
        # print(f"Error here {e}")
        duration = video.duration
        video_start_time = dt.strptime(video_name, '%Y%m%d_%H%M%S')
        video_end_time = video_start_time + timedelta(seconds=duration)
        self.dict.update({video_file: [video_start_time, video_end_time]})
        video.close()
        video = None
        gc.collect()
Пример #24
0
def cat(args):
    '''Subprogram for concatenating multiple videos'''

    help(
        '{script} [--help] [--threads THREADS] [--extension OUTPUT_EXTENSION] [--output OUTPUT_FILE] [--mute] INPUT_FILE [INPUT_FILE ...]'
    )

    value_flags = ['-o', '--output', '-t', '--threads', '-e', '--extension']

    mute = has_any(['-m', '--mute'])

    first_input_file = unnamed_value(0, value_flags, argl=args)
    if first_input_file is None:
        exit()

    output_file = value_following_any(['-o', '--output'], argl=args)
    if output_file is None:
        if has_any(['-e', '--extension'], argl=args):
            out_ext = value_following_any(['-e', '--extension'], argl=args)
        else:
            _, out_ext = splitext(first_input_file)
        output_file = generate_filename(out_ext)

    threads = 8
    if has_any(['-t', '--threads'], argl=args):
        threads = value_following_any(['-t', '--threads'], argl=args)

    input_clips = []

    for i in range(0, 1024):
        input_file = unnamed_value(i, value_flags, argl=args)

        if input_file is None:
            break

        clip = VideoFileClip(input_file)
        input_clips.append(clip)

    for i in range(1, len(input_clips)):
        print(input_clips[i - 1].end)
        input_clips[i].set_start(input_clips[i - 1].end)
        print(input_clips[i].end)

    composition = concatenate_videoclips(input_clips)
    composition.write_videofile(output_file, audio=not mute)
    composition.close()

    for clip in input_clips:
        clip.close()
Пример #25
0
def get_video_duration(path):
    """Get the duration of the passed file and returns it if valid or raise an exception if invalid"""
    try:
        clip = VideoFileClip(path)
    except:
        clip.reader.close()
        clip.audio.reader.close_proc()
        clip.close()
        raise Exception
    else:
        duration = int(clip.duration)
        clip.reader.close()
        clip.audio.reader.close_proc()
        clip.close()
        return duration
Пример #26
0
def clip(args: Namespace):
    '''Subprogram for taking a portion of a video'''

    output_file = args.output
    if output_file is None:
        _, ext = splitext(args.input_file)
        print(f'Output file using same extension as input file: {ext}')
        output_file = generate_filename(ext)

    audio = not args.mute

    clip = VideoFileClip(args.input_file)
    subclip = clip.subclip(args.start_time, args.end_time)
    subclip.write_videofile(output_file, threads=args.threads, audio=audio)
    clip.close()
    subclip.close()
Пример #27
0
def gif(args):
    '''Subprogram for converting videos to gif format'''

    help('{script} gif [--help] [--output OUTPUT_FILE] INPUT_FILE')

    value_flags = ['-o', '--output']

    input_file = unnamed_value(0, value_flags, argl=args)
    if input_file is None:
        exit()

    output_file = value_following_any(['-o', '--output'], argl=args)
    if output_file is None:
        output_file = generate_filename('.gif')

    clip = VideoFileClip(input_file)
    clip.write_gif(output_file, program='ffmpeg')
    clip.close()
Пример #28
0
def cut_video(beginning, end, file):
    """Cut seconds to the passed video in the beginning and in the end, returns True if all OK or False if fails"""
    try:
        video = VideoFileClip(file.path)
        ffmpeg_extract_subclip(file.path.replace('\\', '/'),
                               int(beginning),
                               video.duration - int(end),
                               targetname=file.path.replace('\\', '/')[:-4] +
                               'n' + file.path[len(file.path) - 4:])
        video.reader.close()
        video.audio.reader.close_proc()
        video.close()
        return {'state': True}
    except:
        video.reader.close()
        video.audio.reader.close_proc()
        video.close()
        return {'state': False}
Пример #29
0
def vid2jpg(img):
    path = img
    vidcap = cv2.VideoCapture(path)
    success, image = vidcap.read()
    count = 0
    #clip duration
    clip = VideoFileClip(path)
    clip.close()
    duration = int(clip.duration)
    print("Seconds: {0}".format(duration))
    #clip fps
    fps = int(vidcap.get(cv2.CAP_PROP_FPS))
    print("FPS:     {0}".format(fps))

    print("Estimated images : ", fps * duration)
    print("Analyzing Video. Please be patient!")
    while success:
        cv2.imwrite("frames/frame%d.jpg" % count,
                    image)  # save frame as JPEG file
        success, image = vidcap.read()
        #print ('Read a new frame: ', success)
        count += 1
# copying frame10 from frames folder to test folder
    src = "/Users/santh/PycharmProjects/FakeImage/venv/FakeImageDetection/frames/frame10.jpg"
    dst = "/Users/santh/PycharmProjects/FakeImage/venv/FakeImageDetection/test/"
    shutil.copy(src, dst)

    # deleting the images in frames folder
    folder = '/Users/santh/PycharmProjects/FakeImage/venv/FakeImageDetection/frames'
    for filename in os.listdir(folder):
        file_path = os.path.join(folder, filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
            elif os.path.isdir(file_path):
                shutil.rmtree(file_path)
        except Exception as e:
            print('Failed to delete %s. Reason: %s' % (file_path, e))

    print("Complete!")


#vid2jpg('/Users/santh/Desktop/vid2jpeg/1.mp4')
Пример #30
0
        def find_file_to_add():
            file_selected = filedialog.askopenfilename(
                title="Selectionnez le fichier",
                filetypes=(("mpeg files", "*.mp4"), ("all files", "*.*")))
            # dialogue box where user will select the file to add
            print("Find the file : ", file_selected)  # catch the entire line
            liste_of_filepath = file_selected.split(
                "/")  # catch the name of file
            filename = liste_of_filepath[-1]  # take it
            filename_frame_value.delete(0, END)
            filename_frame_value.insert(0, filename)  # and fulfill the field

            length_frame_value.delete(0, END)  # ... the same for duration
            clip = VideoFileClip(
                file_selected
            )  # using VideoFileClip that is to say moviepy (on pygame)
            clip_length = clip.duration
            clip.close()  # close the file reading to avoid exception.
            length_frame_value.insert(0, clip_length)
Пример #31
0
    def generateSignLanguage(self, inputClipPaths, inputDuration):
        outputPath = "player/media/outputs/"
        finalClips = []
        duration = self.getDurations(inputDuration)

        if not os.path.isdir("player/media/outputs"):
            os.mkdir("player/media/outputs")

        if (len(inputClipPaths) != len(duration)):
            print("The number of sentence and duration is not equal.")
            return -1

        # Merge clips (sentence units)
        for number in range(1, len(inputClipPaths) + 1):
            outputFile = outputPath + str(number) + ".mp4"
            inputClips = inputClipPaths[number - 1]
            self.mergeClips(inputClips, outputFile)
        print("First Merge Completed")

        # Change durations
        for number in range(1, len(duration) + 1):
            outputFile = outputPath + "_" + str(number) + ".mp4"
            inputFile = outputPath + str(number) + ".mp4"
            clip = VideoFileClip(inputFile)
            speed = float(duration[number - 1]) / clip.duration
            clip.close()
            speedCommand = 'ffmpeg -y -i ' + inputFile + ' -vf "setpts=(' + str(speed) + ')*PTS" -an ' + outputFile
            self.subprocessOpen(speedCommand)
        print("Change Durations Completed")

        # Make SignLanguage
        for number in range(1, len(inputClipPaths) + 1):
            finalClips.append(outputPath + "_" + str(number) + ".mp4")

        outputFile = settings.MEDIA_ROOT + 'signLanguage.mp4'
        self.mergeClips(finalClips, outputFile)
        print("Last Merge Completed")
        # shutil.rmtree('player/media/outputs')

        return outputFile
Пример #32
0
class MovieStim3(BaseVisualStim, ContainerMixin, TextureMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """

    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win, units=units, name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0/retraceRate
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        if noAudio:  # to avoid dependency problems in silent movies
            self.sound = None
        else:
            from psychopy import sound
            self.sound = sound

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h],
                                    float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = None
        self._texID = None
        self.status = NOT_STARTED

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other visual
        stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary

        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self.reset()  # set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio=(1 - self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                sound = self.sound
                try:
                    self._audioStream = sound.Sound(
                        self._mov.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                except:
                    # JWE added this as a patch for a moviepy oddity where the
                    # duration is inflated in the saved file causes the
                    # audioclip to be the wrong length, so round down and it
                    # should work
                    jwe_tmp = self._mov.subclip(0, round(self._mov.duration))
                    self._audioStream = sound.Sound(
                        jwe_tmp.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                    del(jwe_tmp)
            else:  # make sure we set to None (in case prev clip had audio)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" % filename)
        # mov has attributes:
            # size, duration, fps
        # mov.audio has attributes
            # duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = 1.0/self._mov.fps
        self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if status != PLAYING:
            self.status = PLAYING #moved this to get better audio behavior - JK
            #Added extra check to prevent audio doubling - JK
            if self._audioStream is not None and self._audioStream.status is not PLAYING: 
                self._audioStream.play()
            if status == PAUSED:
                if self.getCurrentFrameTime() < 0: #Check for valid timestamp, correct if needed -JK
                    self._audioSeek(0)
                else:
                    self._audioSeek(self.getCurrentFrameTime())
            self._videoClock.reset(-self.getCurrentFrameTime())
            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP, obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                if prefs.general['audioLib'] == ['sounddevice']:
                    self._audioStream.pause() #sounddevice has a "pause" function -JK
                else:
                    self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" %
                                   (self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %
                               (self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted -
        it must be loaded again. Use pause() if you may need to restart
        the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self.reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP, obj=self)


    def setVolume(self, volume):
        pass  # to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')
        self._needVertexUpdate = True

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = newVal
        logAttrib(self, log, 'flipVert')
        self._needVertexUpdate = True

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._mov.fps

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._nextFrameT - self._frameInterval

    def _updateFrameTexture(self):
        if self._nextFrameT is None or self._nextFrameT < 0:
            # movie has no current position (or invalid position -JK), 
            # need to reset the clock to zero in order to have the 
            # timing logic work otherwise the video stream would skip 
            # frames until the time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0.0

        # only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif self._numpyFrame is not None:
            if self._nextFrameT > (self._videoClock.getTime() -
                                   self._retraceInterval/2.0):
                return None
        try:
            self._numpyFrame = self._mov.get_frame(self._nextFrameT) 
        except OSError:
            if self.autoLog:
                logging.warning("Frame {} not found, moving one frame and trying again" 
                    .format(self._nextFrameT), obj=self)
            self._nextFrameT += self._frameInterval
            self._updateFrameTexture()
        useSubTex = self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex = False

        # bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        # bind that name to the target
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        # makes the texture map wrap (this is actually default anyway)
        GL.glTexParameteri(
            GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
        # data from PIL/numpy is packed, but default for GL is 4 bytes
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
        # important if using bits++ because GL_LINEAR
        # sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0,
                                GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
                                self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0],
                                   GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        else:
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0,
                                GL.GL_BGR, GL.GL_UNSIGNED_BYTE,
                                self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0],
                                   GL.GL_BGR, GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
                     GL.GL_MODULATE)  # ?? do we need this - think not!

        if self.status == PLAYING:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current
        position in the movie will be determined automatically.

        This method should be called on every frame that the movie is
        meant to appear.
        """

        if (self.status == NOT_STARTED or
                (self.status == FINISHED and self.loop)):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture()  # will check if it's needed

        # scale the drawing frame and get to centre of field
        GL.glPushMatrix()  # push before drawing, pop after
        # push the data for client attributes
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)

        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        # bind textures
        GL.glActiveTexture(GL.GL_TEXTURE1)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)

        array = (GL.GLfloat * 32)(
            1, 1,  # texture coords
            vertsPix[0, 0], vertsPix[0, 1], 0.,  # vertex
            0, 1,
            vertsPix[1, 0], vertsPix[1, 1], 0.,
            0, 0,
            vertsPix[2, 0], vertsPix[2, 1], 0.,
            1, 0,
            vertsPix[3, 0], vertsPix[3, 1], 0.,
        )

        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopMatrix()
        # unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)  # implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        # video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        sound = self.sound
        if self._audioStream is None:
            return  # do nothing
        #check if sounddevice  is being used. If so we can use seek. If not we have to 
        #reload the audio stream and begin at the new loc
        if prefs.general['audioLib'] == ['sounddevice']:
            self._audioStream.seek(t)
        else:
            self._audioStream.stop()
            sndArray = self._mov.audio.to_soundarray()
            startIndex = int(t * self._mov.audio.fps)
            self._audioStream = sound.Sound(
                sndArray[startIndex:, :], sampleRate=self._mov.audio.fps)
            if self.status != PAUSED: #Allows for seeking while paused - JK
                self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        # remove textures from graphics card to prevent crash
        self.clearTextures()
        if self._mov is not None:
            self._mov.close()
        self._mov = None
        self._numpyFrame = None
        if self._audioStream is not None:
            self._audioStream.stop()
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP, obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)