Esempio n. 1
0
 def saturate_clip(self, clip, fps=None):
     # extract frames from the clip and saturate them
     new_frames = []
     total = int(clip.duration * fps) + 1
     for frame in tqdm(clip.iter_frames(fps), total=total):
         frame = img_as_ubyte(self.saturate_frame(frame))
         new_frames.append(frame)
     # create and return a new clip from the saturated frames
     new_clip = ImageSequenceClip(new_frames, fps=fps)
     new_clip = new_clip.set_audio(clip.audio)
     return new_clip
Esempio n. 2
0
def write_video(images):
    normalize_images_in_library()
    convert_to_mp3()

    # Write video clip without audio
    print("Beginning video render.")
    clip = ImageSequenceClip(images, fps=ANIMATION_FRAME_RATE)
    audio_clip = AudioFileClip(TEMP_RECORDING_MP3)
    audio_clip = audio_clip.subclip(0, clip.duration)
    clip = clip.set_audio(audio_clip)
    clip.write_videofile(VIDEO_PATH, fps=ANIMATION_FRAME_RATE, codec='libx264',
                         audio_codec="aac")

    print("Video rendered and saved with audio.")
    exit_program()
Esempio n. 3
0
    def wipe_from_video(self, src_clip, threshold, alpha, keep_audio):

        resultFrames = []

        for frame in tqdm(src_clip.iter_frames()):
            dst  = self.wipe_from_image(Image.fromarray(frame),
                                        threshold=threshold,
                                        alpha=alpha)
            resultFrames.append(np.asarray(dst))

        resultClip = ImageSequenceClip(resultFrames, fps=src_clip.fps,
                                       with_mask=False)
        if keep_audio:
            resultClip = resultClip.set_audio(src_clip.audio)

        return resultClip
    def build_movie(self):
        """ Concatenate self._full_frames images into video file, add back original music. """
        from moviepy.editor import AudioFileClip, ImageSequenceClip

        outname = self.decomposer.wav_file.replace('input', 'output')
        outname = outname.replace('wav', 'mp4')

        output = ImageSequenceClip(
            [self._generate_keyboard(t)[0] for t in range(self.decomposer.chromagram_raw.shape[1])], fps=self.fps_out/2
        )
        output = output.cutout(0, 1)  # trim to compensate for FFT lag
        output = output.set_audio(AudioFileClip(self.decomposer.wav_file))
        output.write_videofile(
            outname,
            fps=self.fps_out,
            temp_audiofile="temp-audio.m4a",
            remove_temp=True,
            codec="libx264",
            audio_codec="aac"
        )
Esempio n. 5
0
def makeVideo(ssdir, audiodir, outputdir, duration):
    clip = ImageSequenceClip(ssdir, duration)
    clip.set_duration(duration)
    myaudio = AudioFileClip(audiodir)
    final_clip = clip.set_audio(myaudio)
    final_clip.write_videofile(outputdir)
    if args["video"]:
        video_clip = VideoFileClip(VIDEO_IN, audio=False)
        audio_clip = AudioFileClip(VIDEO_IN)
        current_directory = os.getcwd()
        tmp_directory = os.path.join(current_directory, TMP_DIR)
        if not os.path.exists(tmp_directory):
            os.makedirs(tmp_directory)

        for i, frame in enumerate(video_clip.iter_frames()):
            frame = get_numpy_transform(frame).unsqueeze(0)
            pred = model(frame.to(device)).cpu().detach().numpy()[0]
            save_image(tmp_directory + "/" + str(i).zfill(5) + ".png", pred)

        video = ImageSequenceClip(sequence=tmp_directory + "/",
                                  fps=video_clip.fps)
        video = video.set_audio(audio_clip)
        video.write_videofile(VIDEO_OUT, audio=True)
        shutil.rmtree(tmp_directory + "/")

    elif args["webcam"]:
        # webcam mode, process frames in webcam stream
        cv2.startWindowThread()
        cv2.namedWindow("frame")
        while True:
            # Read an image.
            cap = cv2.VideoCapture(0)
            ret, frame = cap.read()
            cap.release()
            image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image = load_image(None, Image.fromarray(image))
            image = resize(image.to(device))
Esempio n. 7
0
    n = math.ceil(len(__l) / 6)
    print('正在产生图片...')
    tmp_process = multiprocessing.Process(target=update_transform,
                                          args=('storage/' + rand_key +
                                                '/input/*.png', n_frames))
    tmp_process.start()
    process_list.append(tmp_process)
    for v in [__l[i:i + n] for i in range(0, len(__l), n)]:
        tmp_process = multiprocessing.Process(target=sub_process,
                                              args=(v, rand_key))
        tmp_process.start()
        process_list.append(tmp_process)
    for __process__ in process_list:
        __process__.join()
    print('产生完图片!')

    # 3.转换图片
    style('storage/' + rand_key + '/input', 'storage/' + rand_key + '/output')

    # 4.产生视频
    clip = ImageSequenceClip('storage/' + rand_key + '/output', fps=24)
    out = clip.set_audio(AudioFileClip('data/' + file_name))
    out.write_videofile(filename='data/results/' + target_name,
                        fps=24,
                        codec='libx264',
                        audio_codec='aac',
                        audio=True)

    # 删除文件夹
    shutil.rmtree('storage/' + rand_key)
Esempio n. 8
0
    def makeMovie(self,moviePath:str,outputFolder="/frvtMovieMaker/",deleteIntermediateFiles=True):
        self.movieOutputFolder = os.path.join(outputFolder,"outputMovie")
        if not os.path.exists(self.movieOutputFolder):
            os.makedirs(self.movieOutputFolder)

        cpuCount = multiprocessing.cpu_count()

        eyeColors = [(0,0,255), (255,0,0)]

        regex_url = re.compile(
        r'^(?:http|ftp)s?://' # http:// or https://
        r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
        r'localhost|' #localhost...
        r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
        r'(?::\d+)?' # optional port
        r'(?:/?|[/?]\S+)$', re.IGNORECASE)
        #encode referenceImage
        if re.match(regex_url,moviePath) is not None:
            if 'youtube' in moviePath:
                print("Downloading video...")
                yt = YouTube(moviePath)
                yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first().download(output_path=self.inputFolder)
                moviePath = list(filter(lambda x: x.endswith(".mp4"),os.listdir(self.inputFolder)))[0]
                moviePath = os.path.join(self.inputFolder,moviePath)

               
            else:
                print("Please provide a valid youtube link or file path")
        

        clip = VideoFileClip(moviePath)
        secondsPerWorker = clip.duration/cpuCount


        self.templateOutputFolder = os.path.join(outputFolder,"templates")
        if not os.path.exists(self.templateOutputFolder):
            os.makedirs(self.templateOutputFolder)
            #code templates by multiprocessing
            for currentAlgoInfo in self.algoInfos:
                startTime = 0
                processList = []
                for currentWokerIndex in range(cpuCount):
                    endTime = min(startTime + secondsPerWorker,clip.duration)
                    #codeTemplatesForSubClip(moviePath,startTime,endTime,clip.fps,self.libdir,self.libname)
                    p = Process(target = codeTemplatesForSubClip, args =(moviePath,startTime,endTime,clip.fps,currentAlgoInfo), kwargs={"templateOutputFolder":self.templateOutputFolder})
                    p.start()
                    print(f"Starting worker with start time {startTime} and end time {endTime}")
                    processList.append(p)
                    startTime = endTime
            for currentProcess in processList:
                currentProcess.join()
        else:
            print("Using existing template folder")
        
        self.frameOutputFolder =  os.path.join(outputFolder,"frames")
        if not os.path.exists(self.frameOutputFolder):
            os.makedirs(self.frameOutputFolder)
            self.referenceImageDict = {}
            for referenceImageIndex,referenceImagePath in enumerate(self.referenceImagePaths):
                referenceImage = cv2.imread(referenceImagePath)
                self.referenceImageDict[f"ref_{referenceImageIndex}"] = referenceImage


            #iterate over all algorithms and edbs and generate a hitlist for each algo/edb setting
            for currentAlgoIndex,currentAlgoInfo in enumerate(self.algoInfos):
                currentEyeColor = eyeColors[currentAlgoIndex]
                templateFolderForAlgo = os.path.join(self.templateOutputFolder,currentAlgoInfo.algorithmName)
                libraryLoader = FRVTLibraryLoader()
                libraryLoader.loadLibrary(currentAlgoInfo.libName,libDir=currentAlgoInfo.libDir)
                wrapper = FRVTWrapper(libraryLoader)
                wrapper.initializeTemplateCreation()
                referenceTemplates = []
                for referenceImage in self.referenceImageDict.values():
                    frvtImage = FRVTImage(libraryLoader,referenceImage)
                    multiFace = FRVTMultiface(libraryLoader,frvtImage)
                    (retCode,templateData,isLeftAssigned,isRightAssigned,leftX,rightX,leftY,rightY) = wrapper.encodeTemplate(multiFace)
                    if retCode == 0:
                        print("Reference image successfully enrolled")
                    else:
                        print("Enrollment of reference image was not successful!")
                        raise RuntimeError("Enrollment of reference image failed!")
                    referenceTemplates.append(templateData)
                edbs = self.findEdbsForAlgorithm(currentAlgoInfo)
                for currentEdb in edbs:
                    print(f"Processing edb {currentEdb}")
                    currentManifestFile = os.path.splitext(currentEdb)[0]+".manifest"
                    if not os.path.exists(currentAlgoInfo.enrollmentDir):
                        print("Enrollment dir does not exist. Creating it...")
                        os.makedirs(currentAlgoInfo.enrollmentDir)
                    retCode = wrapper.finalizeEnrolment(currentAlgoInfo.configDir,currentAlgoInfo.enrollmentDir,currentEdb,currentManifestFile, 0)
                    print(f"Finalize enrollment returned ret code {retCode}")
                    retCode = wrapper.initializeIdentification(currentAlgoInfo.configDir,currentAlgoInfo.enrollmentDir)
                    print(f"Initialize identification returned ret code {retCode}")
                    for templateIndex,templateData in enumerate(referenceTemplates):
                        wrapper.insertTemplate(templateData, f"ref_{templateIndex}")
                    self.placeholderImage = cv2.imread("placeholder.jpg")

                    currentFrameNumber = 0
                    for currentFrame in clip.iter_frames(clip.fps):
                        currentFrameNumberAsString = str(currentFrameNumber).zfill(5)
                        frameName = os.path.join(self.frameOutputFolder,currentFrameNumberAsString+".jpg")
                        templateFile = currentFrameNumberAsString+".template"
                        templateFile_full = os.path.join(templateFolderForAlgo,templateFile)
                        eyesFile = currentFrameNumberAsString + ".eyes"
                        eyesFile_full = os.path.join(templateFolderForAlgo,eyesFile)
                        copiedFrame = copy.copy(currentFrame)
                        copiedFrame_bgr = copiedFrame[:,:,::-1]
                        copiedFrame_bgr = np.array(copiedFrame_bgr)
                        if os.path.exists(templateFile_full):
                            templateData = np.fromfile(templateFile_full,dtype=np.int8)

                            candidateList,decisionValue = wrapper.identifyTemplate(templateData,10)
                            #check if already a frame exists and add the new hitlist in this case
                        
                            if os.path.exists(frameName):
                                frameToUse = cv2.imread(frameName)
                                imageWithHitList = self.drawHitListToImage(frameToUse,candidateList.toList(),currentAlgoInfo.algorithmName,currentEdb)
                            else:
                                imageWithHitList = self.drawHitListToImage(copiedFrame_bgr,candidateList.toList(),currentAlgoInfo.algorithmName,currentEdb)

                            eyeFileHandle = open(eyesFile_full,"r")
                            firstLine = eyeFileHandle.readline()
                            firstLine_splitted = firstLine.split(" ")
                            eyeFileHandle.close()
                            #draw eyes if they are assigned
                            if bool(firstLine_splitted[0]):
                                leftX = int(firstLine_splitted[2])
                                rightX = int(firstLine_splitted[3])
                                leftY = int(firstLine_splitted[4])
                                rightY = int(firstLine_splitted[5])
                                imageWithHitList = cv2.circle(imageWithHitList, (leftX,leftY), 3,  currentEyeColor,-1)
                                imageWithHitList = cv2.circle(imageWithHitList, (rightX,rightY), 3,  currentEyeColor,-1)
                            #save frame
                            cv2.imwrite(frameName,imageWithHitList)
                        else:
                            if os.path.exists(frameName):
                                frameToUse = cv2.imread(frameName)
                                enlargedImage = cv2.copyMakeBorder(frameToUse, 0, 200, 0, 0, cv2.BORDER_CONSTANT, None, (255,255,255))
                            else:
                                enlargedImage = cv2.copyMakeBorder(copiedFrame_bgr, 0, 200, 0, 0, cv2.BORDER_CONSTANT, None, (255,255,255))
                            #save frame
                            cv2.imwrite(frameName,enlargedImage)
                        currentFrameNumber +=1
        else:
            print("Using existing frame folder")
        framesToCombine = [os.path.join(self.frameOutputFolder,x) for x in sorted(os.listdir(self.frameOutputFolder))]
        newClip = ImageSequenceClip(framesToCombine,fps=clip.fps)
        newClip = newClip.set_audio(clip.audio)
        newClip.write_videofile(os.path.join(self.movieOutputFolder,"output.mp4"))
        if deleteIntermediateFiles:
            shutil.rmtree(self.frameOutputFolder)
            shutil.rmtree(self.templateOutputFolder)
            shutil.rmtree(self.inputFolder)