def cut_video(recording_path, datapack_dir):

    # Read the start/end pattern
    sr1, pattern_wav = wav.read('pattern.wav')

    workingdir = tempfile.mkdtemp()

    # Open the video file
    clip = VideoFileClip(recording_path)

    # Save its audio track temporarily on disk
    clip.audio.write_audiofile(os.path.join(workingdir, "temp_audio.wav"))

    # Read the audio samples, mix down to mono (if necessary), and delete the temporary audio track
    sr2, recording_wav = wav.read(os.path.join(workingdir, "temp_audio.wav"))
    if recording_wav.shape[1] > 1:
        recording_wav = numpy.mean(recording_wav, 1)

    shutil.rmtree(workingdir)
    # Detect the start and end audio pattern
    start, end = detect_start_end_times(pattern_wav, recording_wav, sr2, 4)

    # Cut the video and write it into two separate video and audio files
    clip.subclip(start + 0.4,
                 end).write_videofile(os.path.join(datapack_dir, 'video.mp4'),
                                      codec='libx264')
    clip.subclip(start + 0.4, end).audio.write_audiofile(
        os.path.join(datapack_dir, 'audio.wav'))
def cut_video(recording_path, datapack_dir):

    # Read the start/end pattern
    sr1, pattern_wav = wav.read('pattern.wav')

    workingdir = tempfile.mkdtemp()

    # Open the video file
    clip = VideoFileClip(recording_path)

    # Save its audio track temporarily on disk
    clip.audio.write_audiofile(os.path.join(workingdir,"temp_audio.wav"))

    # Read the audio samples, mix down to mono (if necessary), and delete the temporary audio track
    sr2, recording_wav = wav.read(os.path.join(workingdir,"temp_audio.wav"))
    if recording_wav.shape[1]>1:
        recording_wav = numpy.mean(recording_wav,1)

    shutil.rmtree(workingdir)
    # Detect the start and end audio pattern
    start, end = detect_start_end_times(pattern_wav, recording_wav, sr2, 4)

    # Cut the video and write it into two separate video and audio files
    clip.subclip(start+0.4, end).write_videofile(os.path.join(datapack_dir, 'video.mp4'), codec='libx264')
    clip.subclip(start+0.4, end).audio.write_audiofile(os.path.join(datapack_dir,'audio.wav'))
Example #3
0
def cutVideo(filepaths):
    ############################################ Initializing ########################################################
    for filepath in filepaths:
        video = VideoFileClip(filepath)
        duration = int(video.duration)
        size = int(duration / 15)
        real_size = duration / 15
        if (size > real_size):
            size = size - 1
        end_custom = (15 * size) + (duration % 15)
        start_time = 0
        end_time = 15
        j = 0
        ################################## Check if Folder Already Exists ###############################################
        for j in range(100):
            if (os.path.isdir(os.getcwd() + '/Stories' + '0' +
                              str(j + 1)) == False):
                os.mkdir(os.getcwd() + '/Stories' + '0' + str(j + 1))
                j += 1
                break
        path = (os.getcwd() + '/Stories' + '0' + str(j))
        ################################## If duration is divisible by 15 ###############################################
        if (duration % 15 == 0):
            for i in range(size):
                #if (abort == None):
                if (end_time > duration + (i - 1 * 0.01)):
                    break
                else:
                    new = video.subclip(start_time, end_time)
                    new.write_videofile(path + "/stories" + '_0' + str(i + 1) +
                                        ".mp4",
                                        threads=8,
                                        preset="veryfast",
                                        audio_codec='aac')
                    start_time += 15.01
                    end_time += 15.01

################################## If duration is NOT divisible by 15 ###############################################
        else:
            i = 0
            while i < size:
                new = video.subclip(start_time, end_time)
                new.write_videofile(path + "/stories" + '_0' + str(i + 1) +
                                    ".mp4",
                                    threads=8,
                                    preset="veryfast",
                                    audio_codec='aac')
                start_time += 15.01
                end_time += 15.01
                i += 1
            if (i == size):
                new = video.subclip(start_time + 0.01, end_custom)
                new.write_videofile(path + "/stories" + '_0' + str(size + 1) +
                                    ".mp4",
                                    threads=8,
                                    preset="veryfast",
                                    audio_codec='aac')
Example #4
0
def clip(args):
    '''Subprogram for taking a portion of a video'''

    help(
        '{script} [--help] [--threads THREADS] [--output OUTPUT_FILE] [--mute] INPUT_FILE START_TIME END_TIME'
    )
    value_flags = ['-o', '--output', '-t', '--threads']
    input_file = unnamed_value(0, value_flags, argl=args)
    start_time = unnamed_value(1, value_flags, argl=args)
    end_time = unnamed_value(2, value_flags, argl=args)
    mute = has_any(['-m', '--mute'])

    output_file = value_following_any(['-o', '--output'], argl=args)
    if output_file is None:
        _, ext = splitext(input_file)
        output_file = generate_filename(ext)

    threads = 8
    if has_any(['-t', '--threads'], argl=args):
        threads = value_following_any(['-t', '--threads'], argl=args)

    clip = VideoFileClip(input_file)
    subclip = clip.subclip(start_time, end_time)
    subclip.write_videofile(output_file, threads=threads, audio=not mute)
    clip.close()
    subclip.close()
Example #5
0
File: ve.py Project: huvipg/python
def delmp4(filename,time1=0,time2=0,mv=0):
#剪切视频文件 路径文件名 去片头  去片尾  到片尾时间


    clip = VideoFileClip(filename)
    clip_len=clip.duration
    if time2==0 and mv>0:
         mvtime=mv
    else:
         mvtime=int(clip_len-time2)  
    if (clip_len-time1-time2)>0 and time1<clip_len and time2<clip_len and mv<clip_len:
    #if ((clip_len-time1-time2)>0 and mvtime==mv)  or (mv>time1 and mv<clip_len and mv>time2) :
        mvclip=clip.subclip(time1,mvtime)
        file_name = os.path.splitext(filename)[0]
        filen= os.path.basename(filename)
        mvclip.write_videofile(work_path+"\\"+filen)
        mvclip.close()
        #防止内存溢出,用完关闭!!!
    else:
        print("视频长度不够!!!!")
        if not os.path.isdir(work_path):
            os.makedirs(work_path)
        fo = open(work_path+"/错误!视频长度不够.txt", "a+")
        fo.write( filename+"\n")
        fo.close()
        #shutil.copy(file_name,len_path) 
        clip.close()    
Example #6
0
def summarize(filepath, new_filename, hotclips):
    """
    Inputs a filepath for a video and generates a new shorter video
    in that same filepath.
    """
    # Only open the file once!
    video = VideoFileClip(filepath)

    chunks = [video.subclip(start, end)
              for (start, end) in hotclips]

    final_clip = concatenate(chunks)

    # txt_clip = ( TextClip("Generated by vSummarize",
    #                      fontsize=20, color='white')
    #             .set_pos('bottom')
    #             .set_duration(5))
    # final_clip = CompositeVideoClip([summarized_video, txt_clip])

    # Use the to_videofile default codec, libx264
    # libx264 is much better than mpeg4, and still writes .mp4
    # Use the fps of the original video.
    final_clip.to_videofile(new_filename,
                            fps=video.fps,
                            audio_codec='mp3')
    def process(self):
        '''
        Process video
        '''
        video_clip = VideoFileClip(self.video_path)
        if self.start or self.end:
            video_clip = video_clip.subclip(self.start, self.end)

        video_clip = video_clip.set_fps(video_clip.fps // 10)

        total_frames = round(video_clip.fps * video_clip.duration)
        print(
            f'Processing video {self.video_path}, {total_frames} frames, size: {video_clip.size}'
        )

        for frame in tqdm(video_clip.iter_frames()):
            # It's better if we resizing before crop to keep the image looks more 'scene'
            h, w, _ = frame.shape
            aspect_ratio = w / h
            ratio = h / (self.image_size * 2)
            w /= ratio
            h = w / aspect_ratio

            frame = cv2.resize(frame[..., ::-1], (int(h), int(w)))
            if self.crop_and_save(frame):
                break

        print(f'Saved {self.counter} images to {self.save_dir}')
Example #8
0
def getVCItem(clipIndex, meanPreCutLen):
    vc = VideoFileClip("movie/clip" + str(clipIndex) + ".mp4")
    vcLen = vc.duration
    startPoint = vcLen / 2 - meanPreCutLen
    endPont = vcLen / 2
    vcItem = vc.subclip(int(startPoint), int(endPont))
    return vcItem
Example #9
0
def main(source):
    print('要剪切的视频是', source[1])  # 获取需要剪切的文件
    source = source[1]
    time = input('请输入开始和结束的时间:\n')  # 获取开始剪切时间
    time = time.split()
    times = []
    while time != []:

        start_time = time.pop(0)
        stop_time = time.pop(0)
        times.append([start_time, stop_time])

    print("子视频命名方式为原名称_段数.mp4")
    temp = source.split('.')
    target = []
    num_of_cuts = len(times)
    for i in list(range(1, num_of_cuts + 1)):
        target.append(temp[0] + str(i) + '.' + temp[1])

    # print('**--**',target)
    video = VideoFileClip(source)
    for i in list(range(0, num_of_cuts, 1)):
        current_time = times[i]
        print('cutting...\n')

        temp = list(map(int, current_time[0].split(":")))
        start_time = reduce(to_seconds, temp)
        temp = list(map(int, current_time[1].split(":")))
        stop_time = reduce(to_seconds, temp)
        new_video = video.subclip(start_time, stop_time)  # 执行剪切操作
        new_video.to_videofile(target[i], fps=29, remove_temp=True)  # 输出文件

    video.close()
    print('done ^_^\n')
Example #10
0
def generateAudioClips(clip: VideoFileClip, segments: List[Segment],
                       output_dir):
    # this needs to be done in two loops to avoid assertions from the sub clipping
    for seg in segments:
        seg.video = clip.subclip(seg.startTime, seg.endTime)
    for seg in segments:
        seg.audioPath = output_dir + "/clip_" + str(seg.startTime) + ".mp3"
        seg.video.audio.write_audiofile(seg.audioPath)
def video_frames(path: Path, t_start=0, t_end=None) -> Iterator[np.ndarray]:
    """Video frames iterator that releases the resources correctly."""
    clip = VideoFileClip(str(path))
    try:
        subclip = clip.subclip(t_start=t_start, t_end=t_end)
        yield from subclip.iter_frames()
    finally:
        ## https://stackoverflow.com/questions/43966523/getting-oserror-winerror-6-the-handle-is-invalid-in-videofileclip-function
        clip.reader.close()
        clip.audio.reader.close_proc()
Example #12
0
 def cut_one(self,row):
     start = self.get_seconds_start(row)
     end = start + row['toAdd']
     self.video_id = f'_{start}{end}.mp4'
     name = f'{self.highlights_location}/video{self.video_id}'
     fajl = VideoFileClip(self.mc["src"])
     new = fajl.subclip(start,end)
     new.write_videofile(name,logger= None)
     progress = self.progress_part * (self.highlights.index(row)+1)
     self.update_cut_progress(progress)
Example #13
0
 def addNum(self):
     source = self.source_le.text().strip()  #获取需要剪切的文件
     target = self.target_le.text().strip()  #获取剪切后视频保存的文件
     start_time = self.start_le.text().strip()  #获取开始剪切时间
     stop_time = self.stop_le.text().strip()  #获取剪切的结束时间
     video = VideoFileClip(source)  #视频文件加载
     video = video.subclip(int(start_time), int(stop_time))  #执行剪切操作
     video.to_videofile(target, fps=20, remove_temp=True)  #输出文件
     self.result_le.setText("ok!")  #输出文件后界面返回OK
     self.result_le.setStyleSheet(
         "color:red;font-size:40px")  #设置OK颜色为红色,大小为四十像素
     self.result_le.setAlignment(Qt.AlignCenter)  #OK在指定框内居中
Example #14
0
def clip(args: Namespace):
    '''Subprogram for taking a portion of a video'''

    output_file = args.output
    if output_file is None:
        _, ext = splitext(args.input_file)
        print(f'Output file using same extension as input file: {ext}')
        output_file = generate_filename(ext)

    audio = not args.mute

    clip = VideoFileClip(args.input_file)
    subclip = clip.subclip(args.start_time, args.end_time)
    subclip.write_videofile(output_file, threads=args.threads, audio=audio)
    clip.close()
    subclip.close()
Example #15
0
def getIntroVid(vidTitle, origCropFrac, sampleHeight, origVidDir, username):
    if len(username)==0:
        username="******"
    titleParts = [vidTitle,
                  " ",
                  "by \n " + username]
    sizes = [50,
             30,
             80]

    introVid = VideoFileClip(origVidDir)
    (w, h) = introVid.size
    length = introVid.duration

    times = [[0, int(length/3)],
             [int(length/3), int(length*2/3)],
             [int(length*2/3), int(length)]]

    # times = [[0, 3],
    #          [3, 9],
    #          [9, 14]]

    introVid = introVid.crop(height=int(round((1 - origCropFrac*2) * sampleHeight, 0)), width = w, x_center=w/2, y_center=h/2)
    clips=[]
    iPart=0
    while iPart < len(titleParts):
        title=titleParts[iPart]
        if times[iPart][1]>introVid.duration:
            times[iPart][1] = introVid.duration
        vidClip = introVid.subclip(times[iPart][0], times[iPart][1])
        text = TextClip(title, font="Amiri-Bold", fontsize=sizes[iPart], color="white", align = 'center').set_position(("center",0.3), relative=True)
        text_clip = mpe.CompositeVideoClip([vidClip, text]).set_duration(vidClip.duration)
        clips.append(text_clip)
        iPart = iPart + 1

    final_clip = concatenate_videoclips(clips, method='compose')

    final_clip1 = fadeout(final_clip, 1, final_color=None)

    final_clip2 = fadein(final_clip1, 1, initial_color=None)

    return final_clip2
Example #16
0
def test_FramesMatches_select_scenes(
    filename,
    subclip,
    match_threshold,
    min_time_span,
    nomatch_threshold,
    expected_result,
):
    video_clip = VideoFileClip(filename)
    if subclip is not None:
        video_clip = video_clip.subclip(subclip[0], subclip[1])
    clip = concatenate_videoclips([video_clip.fx(time_mirror), video_clip])
    result = FramesMatches.from_clip(clip, 10, 3, logger=None).select_scenes(
        match_threshold,
        min_time_span,
        nomatch_threshold=nomatch_threshold,
    )

    assert len(result) == len(expected_result)
    assert result == expected_result
Example #17
0
def crop_word(found_word: str, start: float, end: float, videoid: str,
              save_loc: Tuple[str]):
    # pylint: disable=too-many-arguments, too-many-locals
    '''crops a video into a small segment, including padding, face detection
    and face bounding.'''
    # get full video
    full_video = VideoFileClip(f'videos/{videoid}.mp4')

    # pad video
    pad = 0.25
    start, end = apply_padding(start, end, pad, full_video.duration)

    # get subclip
    word_subclip = full_video.subclip(start, end)

    # get frame
    start_frame = word_subclip.get_frame(t=0)
    end_frame = word_subclip.get_frame(t=end - start)

    # detect faces
    start_faces = get_faces(start_frame)
    end_faces = get_faces(end_frame)

    if len(start_faces) == 1 and len(end_faces) == 1:
        bound_face = get_face_bounds(start_faces[0], end_faces[0])

        final_word = crop(
            word_subclip,
            x1=bound_face['left'],
            x2=bound_face['right'],
            y1=bound_face['top'],
            y2=bound_face['bottom'],
        )

        filename = f'{videoid}_{start:.2f}'
        save_to_file(final_word, found_word, filename, save_loc)

    elif len(start_faces) == 0 or len(end_faces) == 0:
        print('No faces found in either the start or end frame.')
    else:
        print('Multiple faces found')
Example #18
0
def summarize(filepath, new_filename, hotclips):
    """
    Inputs a filepath for a video and generates a new shorter video
    in that same filepath.
    """
    # Only open the file once!
    video = VideoFileClip(filepath)

    chunks = [video.subclip(start, end) for (start, end) in hotclips]

    final_clip = concatenate(chunks)

    # txt_clip = ( TextClip("Generated by vSummarize",
    #                      fontsize=20, color='white')
    #             .set_pos('bottom')
    #             .set_duration(5))
    # final_clip = CompositeVideoClip([summarized_video, txt_clip])

    # Use the to_videofile default codec, libx264
    # libx264 is much better than mpeg4, and still writes .mp4
    # Use the fps of the original video.
    final_clip.to_videofile(new_filename, fps=video.fps, audio_codec='mp3')
Example #19
0
 def video_clip(path, time_start=0, clip_seconds=10, unique_id=None):
     assert isinstance(time_start, int) and time_start >= 0
     assert isinstance(clip_seconds, int) and clip_seconds > 0
     clip = VideoFileClip(path)
     time_end = time_start + clip_seconds
     if time_start > clip.duration:
         time_start = clip.duration
     if time_end > clip.duration:
         time_end = clip.duration
     if time_start - time_end == 0:
         raise core_exceptions.ZeroLengthError()
     if unique_id is None:
         path = os.path.abspath(path)
         unique_id = FNV64.hash(path)
     output_name = f"{unique_id}_{time_start}_{clip_seconds}.mp4"
     print("Clip from", time_start, "to", time_end, "sec in:", output_name)
     sub_clip = clip.subclip(time_start, time_end)
     sub_clip.write_videofile(output_name)
     sub_clip.close()
     clip.close()
     del clip
     del sub_clip
     return output_name
    def summarize(self):
        print('Fetching features...')
        self.__fetch_features()
        print('Features fetched...')
        self.__get_n_frames()
        print('Getting `n` frames...')
        self.__set_scene_detector()
        print('Scene detector set; detecting scenes ...')
        self.__detect_scenes()
        print('Scenes detected, selecting scenes ...')
        selected_scenes = self.__knapsack()
        print('Scenes selected, generating summary ...')
        original = VideoFileClip(self.F.video_filepath)
        clips = []
        for i in range(1, len(selected_scenes)):
            start = self.__timecodes[selected_scenes[i - 1]]
            end = self.__timecodes[selected_scenes[i - 1] + 1]
            clip = original.subclip(start, end)
            clips.append(clip)

        summ = concatenate_videoclips(clips)
        summ.write_videofile(self.__outpath)
        return self.__outpath, "Success"
Example #21
0
    def trim_silences(self, silences: List[Tuple[float, float, float]]):
        video = VideoFileClip(self.in_file)
        full_duration = video.duration
        # print(f"pre_silences: {silences}")
        _silences = self.rescale_all_silences(silences, min_duration=self.min_duration, max_duration=self.max_duration)
        # print(f"post_silences: {_silences}")
        non_silent_periods = list(
            [(end1, start2 - end1, start2) for (_, _, end1), (start2, _, _) in zip(_silences[:-1], _silences[1:])])
        print(non_silent_periods)
        input_dir, input_file = os.path.split(self.in_file)
        fname, fext = os.path.splitext(input_file)
        output_fname = os.path.join(input_dir, f"{fname}_NOSILENCE_{self.min_duration}s{fext}")
        tmp_audio_fname = f"{fname}.TEMP_MPY_wvf_snd_custom.ogg"
        tmp_video_fname = f"{fname}.TEMP_MPY_vid_custom{fext}"
        print(f"writing output to {output_fname}")
        clips = list([video.subclip(s, e) for s, d, e in non_silent_periods if d >= 1])
        print(f"got list of clips")
        # comp = mpy.CompositeVideoClip(clips)
        comp = concatenate_videoclips(clips)
        print(f"make composite video clip (no sound yet)")
        comp.write_videofile(tmp_video_fname, codec=self.codec, preset='ultrafast',
                             threads=os.cpu_count() + 1, audio_codec=self.audio_codec)
        print(f"done writing out ${tmp_video_fname}")

        video.close()
        print(f"closed video")
        # print(f"preparing to write audio")
        # comp.audio.write_audiofile(tmp_audio_fname, buffersize=2000000, codec=self.audio_codec)
        # print(f"wrote audio")
        # comp.audio = None
        # print(f"wrote out video, now combining")
        # output_video = VideoFileClip(tmp_video_fname, audio=False)
        # output_video.set_audio(AudioFileClip(tmp_audio_fname))
        # output_video.write_videofile(output_fname)
        # output_video.write_videofile(output_fname, preset='ultrafast', codec=self.codec, threads=os.cpu_count() + 1,
        #                              fps=video.fps)
        print(f"wrote video out")
Example #22
0
    def process_video(self,
                      video_file,
                      file_out,
                      t_start=None,
                      t_end=None,
                      process_pool=None):

        input_clip = VideoFileClip(video_file)

        if t_start is not None:
            input_clip = input_clip.subclip(t_start=t_start, t_end=t_end)

        if self.debug:
            self.processed_frames = []

            stage_idx = 0

            output_clip = input_clip.fl_image(
                lambda frame: self.process_frame_stage(frame, stage_idx,
                                                       process_pool))
            output_clip.write_videofile(file_out, audio=False)

            if len(self.processed_frames) > 0:
                out_file_path = os.path.split(file_out)
                out_file_name = out_file_path[1].split('.')
                for _ in range(len(self.processed_frames[0]) - 1):
                    self.frame_count = 0
                    stage_idx += 1
                    stage_file = '{}.{}'.format(
                        os.path.join(out_file_path[0], out_file_name[0]) +
                        '_' + str(stage_idx), out_file_name[1])
                    output_clip.write_videofile(stage_file, audio=False)
        else:
            output_clip = input_clip.fl_image(
                lambda frame: self.process_frame(frame, process_pool))
            output_clip.write_videofile(file_out, audio=False)
def instaTV(url, firefox, button, bot, chat_id):
    bot.send_message(chat_id=chat_id,
                     text='Isso pode demorar um pouco',
                     timeout=60)
    temp_dir = tempfile.mkdtemp()
    if (auth(url, firefox, button, bot) == True):
        firefox.get(url)
        time.sleep(1)
        html = firefox.page_source
        time.sleep(1)
        encodedVideo = re.search(r'"video_url":"(.*?)",', html)
        if (encodedVideo == None):
            decoded = ''
            return decoded
        else:
            if (encodedVideo != None):
                path = temp_dir + '/IGTV.mp4'
                decoded = (encodedVideo.group(1).replace(r"\u0026", "&"))
                urllib.request.urlretrieve(decoded, path)
                size = os.path.getsize(path) / 1000000
                if size <= 49.9999:
                    bot.send_video(chat_id=chat_id,
                                   video=open(path, 'rb'),
                                   timeout=3600)
                    shutil.rmtree(temp_dir)
                    return path
                else:
                    video = VideoFileClip(path)
                    duration = int(video.duration)
                    start_time = 0
                    end_time = 600
                    parts = int(duration / 600)
                    videopart = int(duration / 600) + 1
                    end_custom = (600 * parts) + (duration % 600)
                    igtvPath = []
                    if (parts > (video.duration / 600)):
                        parts = parts - 1
                    if (duration <= 630):
                        bot.send_message(chat_id=chat_id,
                                         text='Finalizando ' + runner,
                                         timeout=60)
                        subvideo = video.subclip(0, duration)
                        subvideo.write_videofile(temp_dir + '/IGTV' + '01' +
                                                 '.mp4',
                                                 threads=8,
                                                 preset="veryfast",
                                                 logger=None,
                                                 audio_codec='aac',
                                                 rewrite_audio=False,
                                                 fps=30,
                                                 bitrate='520k')
                        time.sleep(1)
                        bot.send_video(chat_id=chat_id,
                                       video=open(
                                           temp_dir + '/IGTV' + '01' + '.mp4',
                                           'rb'),
                                       timeout=3600)
                        igtvPath = temp_dir + '/IGTV01.mp4'
                        shutil.rmtree(temp_dir)
                        return igtvPath
                    else:
                        if (duration % 600 == 0):
                            c = 1
                            for i in range(parts):
                                if (end_time > duration):
                                    break
                                subvideo = video.subclip(start_time, end_time)
                                bot.send_message(chat_id=chat_id,
                                                 text='Processando ' +
                                                 'parte ' + str(c) + '/' +
                                                 str(videopart) + '' + load,
                                                 timeout=60)
                                subvideo.write_videofile(temp_dir + '/IGTV' +
                                                         '0' + str(i + 1) +
                                                         '.mp4',
                                                         threads=8,
                                                         preset="veryfast",
                                                         logger=None,
                                                         audio_codec='aac',
                                                         rewrite_audio=False,
                                                         fps=30,
                                                         bitrate='550k')
                                time.sleep(1)
                                bot.send_video(chat_id=chat_id,
                                               video=open(
                                                   temp_dir + '/IGTV' + '0' +
                                                   str(i + 1) + '.mp4', 'rb'),
                                               timeout=3600)
                                c += 1
                                start_time += 600.00
                                end_time += 600.00
                        elif (duration % 600 > 0):
                            i = 0
                            c = 1
                            while i < parts:
                                subvideo = video.subclip(start_time, end_time)
                                bot.send_message(chat_id=chat_id,
                                                 text='Processando ' +
                                                 'parte ' + str(c) + '/' +
                                                 str(videopart) + '' + load,
                                                 timeout=60)
                                subvideo.write_videofile(temp_dir + '/IGTV' +
                                                         '0' + str(i + 1) +
                                                         '.mp4',
                                                         threads=8,
                                                         preset="veryfast",
                                                         logger=None,
                                                         audio_codec='aac',
                                                         rewrite_audio=False,
                                                         fps=30,
                                                         bitrate='550k')
                                time.sleep(1)
                                bot.send_video(chat_id=chat_id,
                                               video=open(
                                                   temp_dir + '/IGTV' + '0' +
                                                   str(i + 1) + '.mp4', 'rb'),
                                               timeout=3600)
                                c += 1
                                start_time += 600.00
                                end_time += 600.00
                                i += 1
                            if (i == parts):
                                subvideo = video.subclip(
                                    start_time, end_custom)
                                bot.send_message(chat_id=chat_id,
                                                 text='Processando ' +
                                                 'parte ' + str(c) + '/' +
                                                 str(videopart) + '' + load,
                                                 timeout=60)
                                subvideo.write_videofile(temp_dir + '/IGTV' +
                                                         '0' + str(i + 1) +
                                                         '.mp4',
                                                         threads=8,
                                                         preset="veryfast",
                                                         logger=None,
                                                         audio_codec='aac',
                                                         rewrite_audio=False,
                                                         fps=30,
                                                         bitrate='550k')
                                time.sleep(1)
                                bot.send_video(chat_id=chat_id,
                                               video=open(
                                                   temp_dir + '/IGTV' + '0' +
                                                   str(i + 1) + '.mp4', 'rb'),
                                               timeout=3600)
                            igtvPath = temp_dir
                            shutil.rmtree(temp_dir)
                            return igtvPath
            else:
                decoded = ''
                return decoded
Example #24
0
                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()

        media_path = self._video_path({ 'name' : video_file })
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)
            
            # Resize clip before processing
            if width:
                video = video.resize(width = width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2 = video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:
                def add_caption(caption, clip):
Example #25
0
class MovieStim3(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """

    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win, units=units, name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0 / retraceRate
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h],
                                    float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = None
        self._texID = None
        self.status = NOT_STARTED

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other visual
        stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary

        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self.reset()  # set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio=(1 - self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                try:
                    self._audioStream = sound.Sound(
                        self._mov.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                except:
                    # JWE added this as a patch for a moviepy oddity where the duration is inflated in the saved file
                    # causes the audioclip to be the wrong length, so round down and it should work
                    jwe_tmp = self._mov.subclip(0,round(self._mov.duration))
                    self._audioStream = sound.Sound(
                        jwe_tmp.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                    del(jwe_tmp)
            else:  # make sure we set to None (in case prev clip had audio)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" % filename)
        # mov has attributes:
            # size, duration, fps
        # mov.audio has attributes
            # duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = 1.0 / self._mov.fps
        self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if self._audioStream is not None:
            self._audioStream.play()
        if status != PLAYING:
            self.status = PLAYING
            self._videoClock.reset(-self.getCurrentFrameTime())

            if status == PAUSED:
                self._audioSeek(self.getCurrentFrameTime())

            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP, obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" %
                                   (self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %
                               (self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted -
        it must be loaded again. Use pause() if you may need to restart
        the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self._reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP, obj=self)

    def setVolume(self, volume):
        pass  # to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')
        self._needVertexUpdate = True

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = newVal
        logAttrib(self, log, 'flipVert')
        self._needVertexUpdate = True

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._mov.fps

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._nextFrameT - self._frameInterval

    def _updateFrameTexture(self):
        if self._nextFrameT is None:
            # movie has no current position, need to reset the clock
            # to zero in order to have the timing logic work
            # otherwise the video stream would skip frames until the
            # time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0

        # only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif self._numpyFrame is not None:
            if self._nextFrameT > (self._videoClock.getTime() -
                                   self._retraceInterval / 2.0):
                return None
        self._numpyFrame = self._mov.get_frame(self._nextFrameT)
        useSubTex = self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex = False

        # bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        # bind that name to the target
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        # makes the texture map wrap (this is actually default anyway)
        GL.glTexParameteri(
            GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
        # data from PIL/numpy is packed, but default for GL is 4 bytes
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
        # important if using bits++ because GL_LINEAR
        # sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0,
                                GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
                                self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0],
                                   GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        else:
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0,
                                GL.GL_BGR, GL.GL_UNSIGNED_BYTE,
                                self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0],
                                   GL.GL_BGR, GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
                     GL.GL_MODULATE)  # ?? do we need this - think not!

        if not self.status == PAUSED:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current
        position in the movie will be determined automatically.

        This method should be called on every frame that the movie is
        meant to appear.
        """

        if (self.status == NOT_STARTED or
                (self.status == FINISHED and self.loop)):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture()  # will check if it's needed

        # scale the drawing frame and get to centre of field
        GL.glPushMatrix()  # push before drawing, pop after
        # push the data for client attributes
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)

        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        # bind textures
        GL.glActiveTexture(GL.GL_TEXTURE1)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)

        array = (GL.GLfloat * 32)(
            1, 1,  # texture coords
            vertsPix[0, 0], vertsPix[0, 1], 0.,  # vertex
            0, 1,
            vertsPix[1, 0], vertsPix[1, 1], 0.,
            0, 0,
            vertsPix[2, 0], vertsPix[2, 1], 0.,
            1, 0,
            vertsPix[3, 0], vertsPix[3, 1], 0.,
        )

        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()
        # unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)  # implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        # video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        # for sound we need to extract the array again and just begin at new
        # loc
        if self._audioStream is None:
            return  # do nothing
        self._audioStream.stop()
        sndArray = self._mov.audio.to_soundarray()
        startIndex = int(t * self._mov.audio.fps)
        self._audioStream = sound.Sound(
            sndArray[startIndex:, :], sampleRate=self._mov.audio.fps)
        self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        try:
            # remove textures from graphics card to prevent crash
            self.clearTextures()
        except Exception:
            pass
        self._mov = None
        self._numpyFrame = None
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP, obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False,
                live=False):

        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"

        # -----------------------------------------------------------
        # FIXING THE BUG with Model loading:
        # model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan)))
        # TypeError: __init__() takes exactly 3 arguments (2 given)
        # -----------------------------------------------------------

        # tmp_1 = PluginLoader.get_model(model_name)
        # tmp_1 = PluginLoader._import("Model_LIVE", "Model_LIVE") # that works (crutch however)
        # tmp_2 = Path(self._model_path(use_gan)) # models/emma_to_jade
        # print('\n\n\n{}\n{}\n{}\n{}\n\n\n'.format(tmp_1, type(tmp_1), tmp_2, type(tmp_2)))
        # sys.exit(0)

        # values in faceit_live module:
        # plugins.Model_Original.Model
        # <type 'classobj'>
        # models/emma_to_jade
        # <class 'pathlib.PosixPath'>

        # values here:
        # plugins.Model_Original.Model.Model
        # <type 'classobj'>
        # models/emma_to_jade
        # <class 'pathlib.PosixPath'>
        # -----------------------------------------------------------

        # model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan))) # ==> crash
        model = PluginLoader._import("Model_LIVE", "Model_LIVE")(Path(
            self._model_path(use_gan)))

        # print('\n\n\n{}\n\n\n'.format(self._model_path(use_gan))) # e.g. models/test_2_faces
        # sys.exit(0)

        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        print('Checkpoint_1 ... Model loaded')

        # -----------------------------------------------------------
        # FIXING THE BUG with Converter loading:
        # -----------------------------------------------------------

        # tmp_1 = PluginLoader.get_converter(converter_name)
        # tmp_1 = PluginLoader._import("Convert", "Convert_Masked_LIVE")
        # print('\n\n\n{}\n{}\n\n\n'.format(tmp_1, type(tmp_1)))
        # sys.exit(0)

        # faceit_live module:
        # plugins.Convert_Masked.Convert
        # <type 'classobj'>

        # here:
        # plugins.Convert_Masked.Convert
        # <type 'classobj'>
        # -----------------------------------------------------------

        # Load converter
        # converter = PluginLoader.get_converter(converter_name) # ==> crash
        converter = PluginLoader._import("Convert", "Convert_Masked_LIVE")
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        print('Checkpoint_2 ... Converter loaded')

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter_LIVE(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            # if convert_colors:
            # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV

            DEBUG_MODE = 0
            for face in detect_faces_LIVE(frame, "cnn"):

                if DEBUG_MODE:
                    print('Got face!')
                    # print(dir(face)) # image, x, y, w, h, landmarks
                    print('Face geometry: ({},{},{},{})'.format(
                        face.x, face.y, face.w, face.h))
                    print('Face landmarks: {}'.format(face.landmarks))

                    cv2.imshow('Face', face.image)
                    continue

                if (not face_filter) or (face_filter and filter.check(face)):

                    # if 1:
                    #     print(dir(face.landmarks))
                    #     face.landmarks = []

                    frame = converter.patch_image(frame, face)
                    if not live:
                        frame = frame.astype(numpy.float32)

            # if convert_colors:
            # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV

            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        # ===================================================
        if live:

            print('Staring live mode ...')
            print('Press "Q" to Quit')

            PATH_TO_VIDEO = './data/videos/emma_360_cut.mp4'

            if TEST_2_FACES_FLAG:
                # PATH_TO_VIDEO = './_data/videos/pair_360p_original.mp4'
                PATH_TO_VIDEO = './data/videos/pair_360p_cut.mp4'

            video_capture = cv2.VideoCapture(PATH_TO_VIDEO)

            width = video_capture.get(3)  # float
            height = video_capture.get(4)  # float
            print("video dimensions = {} x {}".format(width, height))

            while 1:

                ret, frame = video_capture.read()
                # print(frame.shape, frame.dtype) # (360, 640, 3), uint8

                # frame = cv2.resize(frame, (640, 480))

                print('HANDLING NEW FRAME ...')

                if CROP_HALF_OF_FRAME == 'left':
                    frame[:, 0:frame.shape[1] /
                          2] = 0  # ~ cropping left half of an image
                # elif CROP_HALF_OF_FRAME == 'right':
                # pass

                if not ret:
                    print("RET IS NONE ... I'M QUIT")
                    video_capture.release()
                    break

                # block without try/except -  to catch actual errors:
                frame = cv2.flip(frame, 1)
                image = _convert_frame(frame, convert_colors=False)
                print('GOT AN IMAGE!')
                frame = cv2.flip(frame, 1)
                image = cv2.flip(image, 1)

                try:  # with flip:

                    # flip image, because webcam inverts it and we trained the model the other way!
                    frame = cv2.flip(frame, 1)

                    image = _convert_frame(frame, convert_colors=False)
                    print('GOT AN IMAGE!')

                    # flip it back
                    frame = cv2.flip(frame, 1)
                    image = cv2.flip(image, 1)

                except:

                    try:  # without flip:

                        image = _convert_frame(frame, convert_colors=False)
                        print('GOT AN IMAGE!')

                    except:

                        print("HMM ... CONVERTATION FAILED ... I'M QUIT")
                        continue
                        # video_capture.release()
                        # break

                cv2.imshow('Video', image)
                cv2.imshow('Original', frame)

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    print("KEYBOARD INTERRUPT ... I'M QUIT")
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()
        # ===================================================

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            if not os.path.exists(os.path.join(self.OUTPUT_PATH)):
                os.makedirs(self.OUTPUT_PATH)
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
Example #27
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
Example #28
0
class MovieStim3(BaseVisualStim, ContainerMixin, TextureMixin):
    """A stimulus class for playing movies.

    This class uses MoviePy and FFMPEG as a backend for loading and decoding
    video data from files.

    Parameters
    ----------
    filename : str
        A string giving the relative or absolute path to the movie.
    flipVert : True or *False*
        If True then the movie will be top-bottom flipped
    flipHoriz : True or *False*
        If True then the movie will be right-left flipped
    volume :
        The nominal level is 100, and 0 is silence.
    loop : bool, optional
        Whether to start the movie over from the beginning if draw is called and
        the movie is done.

    Examples
    --------
    See Movie2Stim.py for demo.

    """
    def __init__(self,
                 win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 anchor="center",
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win,
                                         units=units,
                                         name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = 1.0 / retraceRate
        self.filename = pathToString(filename)
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.anchor = anchor
        self.depth = depth
        self.opacity = opacity
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        if noAudio:  # to avoid dependency problems in silent movies
            self.sound = None
        else:
            from psychopy import sound
            self.sound = sound

        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h], float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()

    @property
    def interpolate(self):
        """Enable linear interpolation (`bool').

        If `True` linear filtering will be applied to the video making the image
        less pixelated if scaled.
        """
        return self._interpolate

    @interpolate.setter
    def interpolate(self, value):
        self._interpolate = value
        self._texFilterNeedsUpdate = True

    @property
    def duration(self):
        """Duration of the video clip in seconds (`float`). Only valid after
        loading a clip, always returning `0.0` if not.
        """
        if self._mov is None:
            return 0.0

        return self._mov.duration

    @property
    def frameInterval(self):
        """Time in seconds each frame is to be presented on screen (`float`).
        Value is `0.0` if no movie is loaded.
        """
        if self._mov is None:
            return 0.0

        return 1. / self._mov.fps

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = 0.0
        self._texID = None
        self.status = NOT_STARTED
        self.nDroppedFrames = 0

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other visual
        stimuli.

        Parameters
        ----------
        filename : str
            The name of the file, including path if necessary.
        log : bool
            Log this event.

        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file.

        After the file is loaded `MovieStim.duration` is updated with the movie
        duration (in seconds).

        Parameters
        ----------
        filename : str
            The name of the file, including path if necessary.
        log : bool
            Log this event.

        """
        filename = pathToString(filename)
        self.reset()  # set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio=(1 - self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                sound = self.sound
                try:
                    self._audioStream = sound.Sound(
                        self._mov.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                except:
                    # JWE added this as a patch for a moviepy oddity where the
                    # duration is inflated in the saved file causes the
                    # audioclip to be the wrong length, so round down and it
                    # should work
                    jwe_tmp = self._mov.subclip(0, round(self._mov.duration))
                    self._audioStream = sound.Sound(
                        jwe_tmp.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                    del (jwe_tmp)
            else:  # make sure we set to None (in case prev clip had audio)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" % filename)
        # mov has attributes:
        # size, duration, fps
        # mov.audio has attributes
        # duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = 1.0 / self._mov.fps
        # self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if status != PLAYING:
            self.status = PLAYING  # moved this to get better audio behavior - JK
            # Added extra check to prevent audio doubling - JK
            if self._audioStream is not None and self._audioStream.status is not PLAYING:
                self._audioStream.play()
            if status == PAUSED:
                if self.getCurrentFrameTime(
                ) < 0:  # Check for valid timestamp, correct if needed -JK
                    self._audioSeek(0)
                else:
                    self._audioSeek(self.getCurrentFrameTime())
            self._videoClock.reset(-self.getCurrentFrameTime())
            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP,
                                   obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                if prefs.hardware['audioLib'] in ['sounddevice', 'PTB']:
                    self._audioStream.pause(
                    )  # sounddevice and PTB have a "pause" function -JK
                else:
                    self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" % (self.name),
                                   level=logging.EXP,
                                   obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" % (self.name),
                               level=logging.EXP,
                               obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted -
        it must be loaded again. Use pause() if you may need to restart
        the movie.
        """
        if self.status != STOPPED:
            self._unload()
            self.reset()
            self.status = STOPPED  # set status to STOPPED after _unload
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP,
                                   obj=self)

    def setVolume(self, volume):
        pass  # to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')
        self._needVertexUpdate = True

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = newVal
        logAttrib(self, log, 'flipVert')
        self._needVertexUpdate = True

    def getFPS(self):
        """Get the movie frames per second.

        Returns
        -------
        float
            Frames per second.

        """
        return float(self._mov.fps)

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._nextFrameT - self.frameInterval

    def _updateFrameTexture(self):
        """Update texture pixel store to contain the present frame. Decoded
        frame image samples are streamed to the texture buffer.

        """
        if self._nextFrameT is None or self._nextFrameT < 0:
            # movie has no current position (or invalid position -JK),
            # need to reset the clock to zero in order to have the
            # timing logic work otherwise the video stream would skip
            # frames until the time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0.0

        # only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif self._numpyFrame is not None:
            if self._nextFrameT > (self._videoClock.getTime() -
                                   self._retraceInterval / 2.0):
                return None

        while self._nextFrameT <= (self._videoClock.getTime() -
                                   self._frameInterval * 2):
            self.nDroppedFrames += 1
            if self.nDroppedFrames <= reportNDroppedFrames:
                logging.warning(
                    "{}: Video catchup needed, advancing self._nextFrameT from"
                    " {} to {}".format(self._videoClock.getTime(),
                                       self._nextFrameT,
                                       self._nextFrameT + self._frameInterval))
            if self.nDroppedFrames == reportNDroppedFrames:
                logging.warning(
                    "Max reportNDroppedFrames reached, will not log any more dropped frames"
                )

            self._nextFrameT += self._frameInterval

        try:
            self._numpyFrame = self._mov.get_frame(self._nextFrameT)
        except OSError:
            if self.autoLog:
                logging.warning(
                    "Frame {} not found, moving one frame and trying again".
                    format(self._nextFrameT),
                    obj=self)
            self._nextFrameT += self._frameInterval
            self._updateFrameTexture()
        useSubTex = self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex = False

        GL.glActiveTexture(GL.GL_TEXTURE0)
        # bind that name to the target
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        # bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        # makes the texture map wrap (this is actually default anyway)
        GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP)
        GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP)
        # data from PIL/numpy is packed, but default for GL is 4 bytes
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
        # important if using bits++ because GL_LINEAR
        # sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                               GL.GL_LINEAR)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                               GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0, GL.GL_RGB,
                                GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0], GL.GL_RGB,
                                   GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        else:
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,
                               GL.GL_NEAREST)
            GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,
                               GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0, GL.GL_BGR,
                                GL.GL_UNSIGNED_BYTE, self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0], GL.GL_BGR,
                                   GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
                     GL.GL_MODULATE)  # ?? do we need this - think not!

        if self.status == PLAYING:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current position in
        the movie will be determined automatically.

        This method should be called on every frame that the movie is meant to
        appear.

        Parameters
        ----------
        win : :class:`~psychopy.visual.Window` or None
            Window the video is being drawn to. If `None`, the window specified
            by property `win` will be used. Default is `None`.

        """
        if (self.status == NOT_STARTED
                or (self.status == FINISHED and self.loop)):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture()  # will check if it's needed

        # scale the drawing frame and get to centre of field
        GL.glPushMatrix()  # push before drawing, pop after
        # push the data for client attributes
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)

        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        # bind textures
        GL.glActiveTexture(GL.GL_TEXTURE1)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)

        array = (GL.GLfloat * 32)(
            1,
            1,  # texture coords
            vertsPix[0, 0],
            vertsPix[0, 1],
            0.,  # vertex
            0,
            1,
            vertsPix[1, 0],
            vertsPix[1, 1],
            0.,
            0,
            0,
            vertsPix[2, 0],
            vertsPix[2, 1],
            0.,
            1,
            0,
            vertsPix[3, 0],
            vertsPix[3, 1],
            0.,
        )

        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopMatrix()
        # unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)  # implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        # video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        sound = self.sound
        if self._audioStream is None:
            return  # do nothing
        # check if sounddevice or PTB is being used. If so we can use seek. If not we
        # have to reload the audio stream and begin at the new loc
        if prefs.hardware['audioLib'] in ['sounddevice', 'PTB']:
            self._audioStream.seek(t)
        else:
            self._audioStream.stop()
            sndArray = self._mov.audio.to_soundarray()
            startIndex = int(t * self._mov.audio.fps)
            self._audioStream = sound.Sound(sndArray[startIndex:, :],
                                            sampleRate=self._mov.audio.fps)
            if self.status != PAUSED:  # Allows for seeking while paused - JK
                self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        # remove textures from graphics card to prevent crash
        self.clearTextures()
        if self._mov is not None:
            self._mov.close()
        self._mov = None
        self._numpyFrame = None
        if self._audioStream is not None:
            self._audioStream.stop()
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP,
                               obj=self)

    def __del__(self):
        try:
            self._unload()
        except (ImportError, ModuleNotFoundError, TypeError):
            pass  # has probably been garbage-collected already

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip.

        Parameters
        ----------
        val : bool
            True to add the stimulus to the draw list, False to remove it.

        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
                if file_.endswith('mp4'):
                    files_to_upload = []

                    # 3) Get video size
                    video_path = join(conf.VIDEO_DIR, file_)
                    size_in_bytes = getsize(video_path)
                    if size_in_bytes > conf.MAX_TRANSACTION_SIZE:
                        # 4) If the size exceeds the transaction limit split it into chunks
                        num_splits = ceil(size_in_bytes /
                                          conf.MAX_TRANSACTION_SIZE)
                        video = VideoFileClip(video_path)
                        for i, (t_start, t_end) in enumerate(
                                get_next_interval(video.duration, num_splits)):
                            video_clip_path = join(
                                conf.VIDEO_DIR, f'{file_[0:-4]}_part{i}.mp4')
                            video_clip = video.subclip(t_start, t_end)
                            video_clip.write_videofile(video_clip_path,
                                                       codec='libx265')
                            files_to_upload.append(video_clip_path)
                    else:
                        files_to_upload.append(video_path)

                    # 4) Upload the video and after doing so move it to the uploaded folder
                    for file_to_upload in files_to_upload:

                        print(f'Uploading {path.basename(file_to_upload)}...')

                        # upload video to IPFS
                        res = ipfs_client.add(file_to_upload)

                        # outputs IPFS hash (=hash of file content)
Example #30
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False,
                live=False,
                webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if (live):
            # generate dummy content for testing /dev/video1
            #ffmpeg -f x11grab -s 640x480 -i :0.0+10,20 -vf format=pix_fmts=yuv420p -f v4l2 /dev/video1
            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")
            # connect to webcam
            video_capture = cv2.VideoCapture(0)
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4)  # float
            print("webcam dimensions = {} x {}".format(width, height))

            #video_capture = cv2.VideoCapture('./data/videos/ale.mp4')
            if (webcam):
                # create fake webcam device
                camera = pyfakewebcam.FakeWebcam('/dev/video1', 640, 480)
                camera.print_capabilities()
                print(
                    "Fake webcam created, try using appear.in on Firefox or  ")

            # loop until user clicks 'q' to exit
            while True:
                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                # flip image, because webcam inverts it and we trained the model the other way!
                frame = cv2.flip(frame, 1)
                image = _convert_frame(frame, convert_colors=False)
                # flip it back
                image = cv2.flip(image, 1)

                if (webcam):
                    time.sleep(1 / 30.0)
                    # firefox needs RGB
                    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    # chrome and skype UYUV - not working at the moment
                    # image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)

                    camera.schedule_frame(image)
                    #print("writing to stream")

                else:
                    cv2.imshow('Video', image)
                    #print("writing to screen")

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            if not os.path.exists(os.path.join(self.OUTPUT_PATH)):
                os.makedirs(self.OUTPUT_PATH)
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
Example #31
0
class MovieStim3(BaseVisualStim, ContainerMixin):
    """A stimulus class for playing movies (mpeg, avi, etc...) in PsychoPy
    that does not require avbin. Instead it requires the cv2 python package
    for OpenCV. The VLC media player also needs to be installed on the
    psychopy computer.

    **Example**::

        See Movie2Stim.py for demo.
    """

    def __init__(self, win,
                 filename="",
                 units='pix',
                 size=None,
                 pos=(0.0, 0.0),
                 ori=0.0,
                 flipVert=False,
                 flipHoriz=False,
                 color=(1.0, 1.0, 1.0),
                 colorSpace='rgb',
                 opacity=1.0,
                 volume=1.0,
                 name='',
                 loop=False,
                 autoLog=True,
                 depth=0.0,
                 noAudio=False,
                 vframe_callback=None,
                 fps=None,
                 interpolate=True):
        """
        :Parameters:

            filename :
                a string giving the relative or absolute path to the movie.
            flipVert : True or *False*
                If True then the movie will be top-bottom flipped
            flipHoriz : True or *False*
                If True then the movie will be right-left flipped
            volume :
                The nominal level is 100, and 0 is silence.
            loop : bool, optional
                Whether to start the movie over from the beginning if draw is
                called and the movie is done.

        """
        # what local vars are defined (these are the init params) for use
        # by __repr__
        self._initParams = dir()
        self._initParams.remove('self')
        super(MovieStim3, self).__init__(win, units=units, name=name,
                                         autoLog=False)

        retraceRate = win._monitorFrameRate
        if retraceRate is None:
            retraceRate = win.getActualFrameRate()
        if retraceRate is None:
            logging.warning("FrameRate could not be supplied by psychopy; "
                            "defaulting to 60.0")
            retraceRate = 60.0
        self._retraceInterval = old_div(1.0, retraceRate)
        self.filename = filename
        self.loop = loop
        self.flipVert = flipVert
        self.flipHoriz = flipHoriz
        self.pos = numpy.asarray(pos, float)
        self.depth = depth
        self.opacity = float(opacity)
        self.interpolate = interpolate
        self.noAudio = noAudio
        self._audioStream = None
        self.useTexSubImage2D = True

        if noAudio:  # to avoid dependency problems in silent movies
            self.sound = None
        else:
            from psychopy import sound
            self.sound = sound

        self._videoClock = Clock()
        self.loadMovie(self.filename)
        self.setVolume(volume)
        self.nDroppedFrames = 0

        # size
        if size is None:
            self.size = numpy.array([self._mov.w, self._mov.h],
                                    float)
        else:
            self.size = val2array(size)
        self.ori = ori
        self._updateVertices()
        # set autoLog (now that params have been initialised)
        self.autoLog = autoLog
        if autoLog:
            logging.exp("Created %s = %s" % (self.name, str(self)))

    def reset(self):
        self._numpyFrame = None
        self._nextFrameT = None
        self._texID = None
        self.status = NOT_STARTED

    def setMovie(self, filename, log=True):
        """See `~MovieStim.loadMovie` (the functions are identical).

        This form is provided for syntactic consistency with other visual
        stimuli.
        """
        self.loadMovie(filename, log=log)

    def loadMovie(self, filename, log=True):
        """Load a movie from file

        :Parameters:

            filename: string
                The name of the file, including path if necessary

        After the file is loaded MovieStim.duration is updated with the movie
        duration (in seconds).
        """
        self.reset()  # set status and timestamps etc

        # Create Video Stream stuff
        if os.path.isfile(filename):
            self._mov = VideoFileClip(filename, audio=(1 - self.noAudio))
            if (not self.noAudio) and (self._mov.audio is not None):
                sound = self.sound
                try:
                    self._audioStream = sound.Sound(
                        self._mov.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                except:
                    # JWE added this as a patch for a moviepy oddity where the
                    # duration is inflated in the saved file causes the
                    # audioclip to be the wrong length, so round down and it
                    # should work
                    jwe_tmp = self._mov.subclip(0, round(self._mov.duration))
                    self._audioStream = sound.Sound(
                        jwe_tmp.audio.to_soundarray(),
                        sampleRate=self._mov.audio.fps)
                    del(jwe_tmp)
            else:  # make sure we set to None (in case prev clip had audio)
                self._audioStream = None
        else:
            raise IOError("Movie file '%s' was not found" % filename)
        # mov has attributes:
            # size, duration, fps
        # mov.audio has attributes
            # duration, fps (aka sampleRate), to_soundarray()
        self._frameInterval = old_div(1.0, self._mov.fps)
        self.duration = self._mov.duration
        self.filename = filename
        self._updateFrameTexture()
        logAttrib(self, log, 'movie', filename)

    def play(self, log=True):
        """Continue a paused movie from current position.
        """
        status = self.status
        if status != PLAYING:
            if self._audioStream is not None:
                self._audioStream.play()
            if status == PAUSED:
                if self.getCurrentFrameTime() < 0:
                    self._audioSeek(0)
                else:
                    self._audioSeek(self.getCurrentFrameTime())
            self.status = PLAYING
            self._videoClock.reset(-self.getCurrentFrameTime())

            if log and self.autoLog:
                self.win.logOnFlip("Set %s playing" % (self.name),
                                   level=logging.EXP, obj=self)
            self._updateFrameTexture()

    def pause(self, log=True):
        """
        Pause the current point in the movie (sound will stop, current frame
        will not advance).  If play() is called again both will restart.
        """
        if self.status == PLAYING:
            self.status = PAUSED
            if self._audioStream:
                self._audioStream.stop()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s paused" %
                                   (self.name), level=logging.EXP, obj=self)
            return True
        if log and self.autoLog:
            self.win.logOnFlip("Failed Set %s paused" %
                               (self.name), level=logging.EXP, obj=self)
        return False

    def stop(self, log=True):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted -
        it must be loaded again. Use pause() if you may need to restart
        the movie.
        """
        if self.status != STOPPED:
            self.status = STOPPED
            self._unload()
            self.reset()
            if log and self.autoLog:
                self.win.logOnFlip("Set %s stopped" % (self.name),
                                   level=logging.EXP, obj=self)

    def setVolume(self, volume):
        pass  # to do

    def setFlipHoriz(self, newVal=True, log=True):
        """If set to True then the movie will be flipped horizontally
        (left-to-right). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipHoriz = newVal
        logAttrib(self, log, 'flipHoriz')
        self._needVertexUpdate = True

    def setFlipVert(self, newVal=True, log=True):
        """If set to True then the movie will be flipped vertically
        (top-to-bottom). Note that this is relative to the original,
        not relative to the current state.
        """
        self.flipVert = newVal
        logAttrib(self, log, 'flipVert')
        self._needVertexUpdate = True

    def getFPS(self):
        """
        Returns the movie frames per second playback speed.
        """
        return self._mov.fps

    def getCurrentFrameTime(self):
        """Get the time that the movie file specified the current
        video frame as having.
        """
        return self._nextFrameT - self._frameInterval

    def _updateFrameTexture(self):
        if self._nextFrameT is None:
            # movie has no current position, need to reset the clock
            # to zero in order to have the timing logic work
            # otherwise the video stream would skip frames until the
            # time since creating the movie object has passed
            self._videoClock.reset()
            self._nextFrameT = 0

        # only advance if next frame (half of next retrace rate)
        if self._nextFrameT > self.duration:
            self._onEos()
        elif self._numpyFrame is not None:
            if self._nextFrameT > (self._videoClock.getTime() -
                                   old_div(self._retraceInterval, 2.0)):
                return None
        self._numpyFrame = self._mov.get_frame(self._nextFrameT)
        useSubTex = self.useTexSubImage2D
        if self._texID is None:
            self._texID = GL.GLuint()
            GL.glGenTextures(1, ctypes.byref(self._texID))
            useSubTex = False

        # bind the texture in openGL
        GL.glEnable(GL.GL_TEXTURE_2D)
        # bind that name to the target
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        # makes the texture map wrap (this is actually default anyway)
        GL.glTexParameteri(
            GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_REPEAT)
        # data from PIL/numpy is packed, but default for GL is 4 bytes
        GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
        # important if using bits++ because GL_LINEAR
        # sometimes extrapolates to pixel vals outside range
        if self.interpolate:
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0,
                                GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
                                self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0],
                                   GL.GL_RGB, GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        else:
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
            GL.glTexParameteri(
                GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
            if useSubTex is False:
                GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB8,
                                self._numpyFrame.shape[1],
                                self._numpyFrame.shape[0], 0,
                                GL.GL_BGR, GL.GL_UNSIGNED_BYTE,
                                self._numpyFrame.ctypes)
            else:
                GL.glTexSubImage2D(GL.GL_TEXTURE_2D, 0, 0, 0,
                                   self._numpyFrame.shape[1],
                                   self._numpyFrame.shape[0],
                                   GL.GL_BGR, GL.GL_UNSIGNED_BYTE,
                                   self._numpyFrame.ctypes)
        GL.glTexEnvi(GL.GL_TEXTURE_ENV, GL.GL_TEXTURE_ENV_MODE,
                     GL.GL_MODULATE)  # ?? do we need this - think not!

        if not self.status == PAUSED:
            self._nextFrameT += self._frameInterval

    def draw(self, win=None):
        """Draw the current frame to a particular visual.Window (or to the
        default win for this object if not specified). The current
        position in the movie will be determined automatically.

        This method should be called on every frame that the movie is
        meant to appear.
        """

        if (self.status == NOT_STARTED or
                (self.status == FINISHED and self.loop)):
            self.play()
        elif self.status == FINISHED and not self.loop:
            return
        if win is None:
            win = self.win
        self._selectWindow(win)
        self._updateFrameTexture()  # will check if it's needed

        # scale the drawing frame and get to centre of field
        GL.glPushMatrix()  # push before drawing, pop after
        # push the data for client attributes
        GL.glPushClientAttrib(GL.GL_CLIENT_ALL_ATTRIB_BITS)

        self.win.setScale('pix')
        # move to centre of stimulus and rotate
        vertsPix = self.verticesPix

        # bind textures
        GL.glActiveTexture(GL.GL_TEXTURE1)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, self._texID)
        GL.glEnable(GL.GL_TEXTURE_2D)

        # sets opacity (1,1,1 = RGB placeholder)
        GL.glColor4f(1, 1, 1, self.opacity)

        array = (GL.GLfloat * 32)(
            1, 1,  # texture coords
            vertsPix[0, 0], vertsPix[0, 1], 0.,  # vertex
            0, 1,
            vertsPix[1, 0], vertsPix[1, 1], 0.,
            0, 0,
            vertsPix[2, 0], vertsPix[2, 1], 0.,
            1, 0,
            vertsPix[3, 0], vertsPix[3, 1], 0.,
        )

        # 2D texture array, 3D vertex array
        GL.glInterleavedArrays(GL.GL_T2F_V3F, 0, array)
        GL.glDrawArrays(GL.GL_QUADS, 0, 4)
        GL.glPopClientAttrib()
        GL.glPopAttrib()
        GL.glPopMatrix()
        # unbind the textures
        GL.glActiveTexture(GL.GL_TEXTURE0)
        GL.glBindTexture(GL.GL_TEXTURE_2D, 0)
        GL.glEnable(GL.GL_TEXTURE_2D)  # implicitly disables 1D

    def seek(self, t):
        """Go to a specific point in time for both the audio and video streams
        """
        # video is easy: set both times to zero and update the frame texture
        self._nextFrameT = t
        self._videoClock.reset(t)
        self._audioSeek(t)

    def _audioSeek(self, t):
        sound = self.sound
        # for sound we need to extract the array again and just begin at new
        # loc
        if self._audioStream is None:
            return  # do nothing
        self._audioStream.stop()
        sndArray = self._mov.audio.to_soundarray()
        startIndex = int(t * self._mov.audio.fps)
        self._audioStream = sound.Sound(
            sndArray[startIndex:, :], sampleRate=self._mov.audio.fps)
        self._audioStream.play()

    def _getAudioStreamTime(self):
        return self._audio_stream_clock.getTime()

    def _unload(self):
        try:
            # remove textures from graphics card to prevent crash
            self.clearTextures()
        except Exception:
            pass
        self._mov = None
        self._numpyFrame = None
        self._audioStream = None
        self.status = FINISHED

    def _onEos(self):
        if self.loop:
            self.seek(0.0)
        else:
            self.status = FINISHED
            self.stop()

        if self.autoLog:
            self.win.logOnFlip("Set %s finished" % self.name,
                               level=logging.EXP, obj=self)

    def __del__(self):
        self._unload()

    def setAutoDraw(self, val, log=None):
        """Add or remove a stimulus from the list of stimuli that will be
        automatically drawn on each flip

        :parameters:
            - val: True/False
                True to add the stimulus to the draw list, False to remove it
        """
        if val:
            self.play(log=False)  # set to play in case stopped
        else:
            self.pause(log=False)
        # add to drawing list and update status
        setAttribute(self, 'autoDraw', val, log)
Example #32
0
    def add_word(self,
                 word,
                 collection,
                 start,
                 end,
                 name,
                 add_type,
                 word_type,
                 group,
                 word_id='',
                 wordset_id=''):
        clean_word = word.strip()
        puresave_filename = name.split('.')[0] + "~" + clean_word
        # row = {
        #     "videaname":puresave_filename,
        #     "wordbase_type":"video"
        # }
        # w = WordbaseHelper()
        # w.init_word(row,clean_word)
        # w.insert(row,collection)

        data = {
            'filename': puresave_filename,
            'wordbase_collection': collection,
            'word': word,
            'add_type': add_type,
            'word_type': word_type,
            'group': group,
            'word_id': word_id,
            'wordset_id': wordset_id,
        }

        work_dir = "D:\BaiduYunDownload"
        file_path = ""
        parent_path = ""
        double_loop_flag = False
        for parent, dirnames, filenames in os.walk(work_dir, followlinks=True):
            for filename in filenames:
                if filename == name:
                    parent_path = parent
                    file_path = os.path.join(parent, filename)
                    double_loop_flag = True
                    break
            if double_loop_flag:
                break
        start_time = float(start)
        end_time = float(end)
        pure_filename = name.split('.')[0]

        subfile_path = os.path.join(parent_path, pure_filename + ".srt")
        video_clip = VideoFileClip(file_path)
        clip = video_clip.subclip(start_time, end_time)
        target = "D:\BaiduYunDownload\\videos\\" + puresave_filename + ".mp4"
        clip.write_videofile(target,
                             codec='libx264',
                             verbose=False,
                             audio=True)
        video_clip.close()

        subtitle = SSAFile.load(subfile_path)
        text = '''
        1
        00:00:00,000 --> 00:00:00,000
        
        '''
        temp = SSAFile().from_string(text)
        for sub in subtitle:
            if sub.start >= start_time * 1000 and sub.end <= end_time * 1000:
                text = sub.text.replace(
                    clean_word, '<c.video-heightlight>' + clean_word + '</c>')
                sub.text = text
                sub.shift(s=-start_time)
                temp.append(sub)
        sub_target = "D:\BaiduYunDownload\\videos\\" + puresave_filename
        temp.save(sub_target + '.srt')
        vtt = WebVTT().from_srt(sub_target + '.srt')
        vtt.save(sub_target + '.vtt')

        files = {
            "video": open(target, "rb"),
            "subtitle": open(sub_target + '.vtt', "rb")
        }
        # print(files)

        # r = requests.post('http://127.0.0.1:5000/video', data=data,files=files)
        r = requests.post('http://' + server_ip + '/video',
                          data=data,
                          files=files)
        # print(r.request)

        return "true"
Example #33
0
    def transform_video(self,
                        input_path,
                        output_path,
                        batch_size=4,
                        start=0,
                        end=0):
        '''
        Transform a video to animation version
        https://github.com/lengstrom/fast-style-transfer/blob/master/evaluate.py#L21
        '''
        # Force to None
        end = end or None

        if not os.path.isfile(input_path):
            raise FileNotFoundError(f'{input_path} does not exist')

        output_dir = "/".join(output_path.split("/")[:-1])
        os.makedirs(output_dir, exist_ok=True)
        is_gg_drive = '/drive/' in output_path
        temp_file = ''

        if is_gg_drive:
            # Writing directly into google drive can be inefficient
            temp_file = f'tmp_anime.{output_path.split(".")[-1]}'

        def transform_and_write(frames, count, writer):
            anime_images = denormalize_input(self.transform(frames),
                                             dtype=np.uint8)
            for i in range(0, count):
                img = np.clip(anime_images[i], 0, 255)
                writer.write_frame(img)

        video_clip = VideoFileClip(input_path, audio=False)
        if start or end:
            video_clip = video_clip.subclip(start, end)

        video_writer = ffmpeg_writer.FFMPEG_VideoWriter(temp_file
                                                        or output_path,
                                                        video_clip.size,
                                                        video_clip.fps,
                                                        codec="libx264",
                                                        preset="medium",
                                                        bitrate="2000k",
                                                        audiofile=input_path,
                                                        threads=None,
                                                        ffmpeg_params=None)

        total_frames = round(video_clip.fps * video_clip.duration)
        print(
            f'Transfroming video {input_path}, {total_frames} frames, size: {video_clip.size}'
        )

        batch_shape = (batch_size, video_clip.size[1], video_clip.size[0], 3)
        frame_count = 0
        frames = np.zeros(batch_shape, dtype=np.float32)
        for frame in tqdm(video_clip.iter_frames()):
            try:
                frames[frame_count] = frame
                frame_count += 1
                if frame_count == batch_size:
                    transform_and_write(frames, frame_count, video_writer)
                    frame_count = 0
            except Exception as e:
                print(e)
                break

        # The last frames
        if frame_count != 0:
            transform_and_write(frames, frame_count, video_writer)

        if temp_file:
            # move to output path
            shutil.move(temp_file, output_path)

        print(f'Animation video saved to {output_path}')
        video_writer.close()