예제 #1
0
class Thread(QtCore.QThread):
    changePixmap = QtCore.Signal(QtGui.QImage)


    def __init__(self, parent=None):
        QtCore.QThread.__init__(self, parent=parent)
        self.file_path = "d:/extractSection/test.mp4"
        self.openVideo()
        self.play = True

    def run(self):
        while True:

            if self.play:
                self.player.set_pause(False)
                ret,frame = self.cap.read()
                # audio_frame,val = self.player.get_frame()

                if ret:
                    rgbImage = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
                    h,w,ch = rgbImage.shape
                    bytesPerLine = ch * w
                    convertToQtFormat = QtGui.QImage(rgbImage.data,w,h,bytesPerLine,QtGui.QImage.Format_RGB888)
                    p = convertToQtFormat.scaled(640,480,QtCore.Qt.KeepAspectRatio)
                    self.changePixmap.emit(p)

            time.sleep(0.025)



    def pauseVideo(self):
        self.play = False
        self.player.set_pause(True)

    def playVideo(self):
        self.play = True
        # self.player.set_pause(False)

    def stopVideo(self):
        pass

    def openVideo(self):
        self.cap = cv2.VideoCapture(self.file_path)
        self.cap.set(cv2.CAP_PROP_POS_FRAMES,1)
        self.player = MediaPlayer(self.file_path)
        self.player.set_pause(True)
예제 #2
0
class MainWindow(QMainWindow):
    pass

    def __init__(self):
        super().__init__()
        self.player = None
        self.setWindowTitle("FFPyPlayer Test")

    def showEvent(self, e):
        self.timer_id = self.startTimer(1)
        self.lbl = QLabel(self)
        self.lbl.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
        self.setCentralWidget(self.lbl)

    def timerEvent(self, event) -> None:
        self.killTimer(self.timer_id)
        ff_opts = {'paused': False, 'autoexit': True}
        self.player = MediaPlayer('../example_data/sample.mp4', ff_opts=ff_opts, lib_opts={})
        # self.player = MediaPlayer('http://localhost:1441/sample_stream.mp4', ff_opts=ff_opts, lib_opts={})
        self.running = True
        while self.running:
            time.sleep(0.01)
            frame, val = self.player.get_frame()
            if val == 'eof':
                break
            if frame is None:
                time.sleep(0.01)
            else:
                img, t = frame
                data = img.to_bytearray()[0]
                width, height = img.get_size()
                # the technical name for the 'rgb24' default pixel format is RGB888,
                # which is QImage.Format_RGB888 in the QImage format enum
                qimage = QImage(data, width, height, QImage.Format_RGB888)
                pixmap = QPixmap.fromImage(qimage)
                pixmap = pixmap.scaled(self.lbl.width(), self.lbl.height(),
                                       Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.lbl.setPixmap(pixmap)
                time.sleep(val)
            QApplication.processEvents()

    def closeEvent(self, event) -> None:
        self.running = False
        if self.player is not None:
            self.player.set_pause(True)
            self.player.close_player()
예제 #3
0
class AudioPlayer:
    def __init__(self):
        self._widget = None
        self._player = None
        self._timer = None

    def toggle_playback(self, widget):
        if self._widget == widget:
            if self._player.get_pause():
                plugins.video.video_player.pause_playback()
                self._player.set_pause(False)
                self._widget.audio_state = 'play'
                self._timer = Clock.schedule_interval(self._playback_update,
                                                      .1)
            else:
                self.pause_playback()
        else:
            plugins.video.video_player.pause_playback()
            if self._widget is not None:
                self.pause_playback()
            self._widget = widget
            self._widget.audio_state = 'play'
            self._player = MediaPlayer(filename=self._widget.audio_source,
                                       ff_opts={
                                           'paused': True,
                                           'ss': self._widget.audio_pos
                                       })
            Clock.schedule_interval(self._start_playback, .1)

    def _start_playback(self, dt):
        if self._player.get_metadata()['duration'] is not None:
            self._player.set_pause(False)
            self._timer = Clock.schedule_interval(self._playback_update, .1)
            return False

    def pause_playback(self):
        if self._timer is not None:
            self._timer.cancel()
        if self._player is not None and not self._player.get_pause():
            self._player.set_pause(True)
        if self._widget is not None:
            self._widget.audio_state = 'pause'

    def _playback_update(self, dt):
        pts = self._player.get_pts()
        if pts >= self._widget.audio_length:
            self._player.set_pause(True)
            self._player.seek(pts=0, relative=False, accurate=True)
            self._widget.audio_state = 'pause'
            self._widget.audio_pos = 0
            return False
        self._widget.audio_pos = pts

    def update_audio_pos(self, widget, pts):
        if self._widget == widget and self._player is not None:
            self._player.seek(pts=pts, relative=False, accurate=True)
        widget.audio_pos = pts
예제 #4
0
class Application(tk.Frame):
    def __init__(self, master=None):
        super().__init__(master)
        self.master = master
        self.master.geometry("640x420")
        self.winfo_toplevel().title("YouTube downloader")
        #Create icon from base64 string
        icon_file = io.BytesIO(base64.b64decode(icon))
        img = Image.open(icon_file, mode='r')
        self.master.iconphoto(True, ImageTk.PhotoImage(image=img))

        self.video_link = tk.StringVar()  #Link to the youtube video
        self.download_path = tk.StringVar()  #Folder to download videos to
        self.video_folder = tk.StringVar(
        )  #Represents the folder to play videos from
        self.selected_video = 0  #Current playing video, idx
        self.playlist = []  #Array of video's to play
        self.downloadLeft = [0, 0]
        self.download_count = None  #Download count text widget
        self.video_list = None
        self.video_embed = None
        #Styles
        self.mainBgColor = "#121212"
        self.labelBgColor = "#1E1E1E"
        self.fontColor = "white"
        self.btnHighlight = "#FF00FF"

        #Video player
        self.video_player = None
        #Create blank image for video player
        img_str = io.BytesIO(base64.b64decode(blank))
        img = Image.open(img_str, mode='r')
        self.blank_img = ImageTk.PhotoImage(image=img)

        #Play/Stop video buttons
        self.playButton = None
        self.stopButton = None
        self.pauseButton = None
        self.playlistChanged = False  #Flag for seeing whether playlist has changed
        self.songChanged = False  #Flag for seeing if user pressed next button
        self.playback_buttons_frame = None  #Bind the playback buttons frame for swapping buttons inside of it
        self.isPlaying = False  #Flag used seeing if video is being streamed (Used in next/prev song, not in thread termination!)
        self.curVolume = 50
        self.curBassBoost = 0  #Maybe used one day
        self.now_playing = ""

        self.master.resizable(False, False)
        self.create_widgets()
        self.fps = 30

    def create_widgets(self):
        self.master.config(bg=self.mainBgColor)

        ##DOWNLOADING
        download_frame = LabelFrame(self.master, bg=self.mainBgColor, width=20)
        download_frame.grid(row=0, column=0, pady=10)

        #Input/Link frame
        input_frame = LabelFrame(download_frame, bg=self.mainBgColor, bd=0)
        input_frame.grid(row=0, column=0, padx=5)

        link_lable = tk.Label(input_frame,
                              text="YouTube link: ",
                              width=10,
                              bg=self.mainBgColor,
                              fg=self.fontColor)
        link_lable.grid(row=1, column=0, pady=5, padx=20)

        self.master.linkText = tk.Entry(input_frame,
                                        width=54,
                                        textvariable=self.video_link,
                                        bg=self.labelBgColor,
                                        fg=self.fontColor)
        self.master.linkText.grid(row=1, column=1, padx=2)

        #destination frame (for better button positioning)
        destination_frame = LabelFrame(download_frame,
                                       bg=self.mainBgColor,
                                       bd=0)
        destination_frame.grid(row=2, column=0)

        destination_label = tk.Label(destination_frame,
                                     text="Destination: ",
                                     width=10,
                                     bg=self.mainBgColor,
                                     fg=self.fontColor)
        destination_label.grid(row=0, column=0, padx=20)

        self.master.destinationText = tk.Entry(destination_frame,
                                               width=40,
                                               textvariable=self.download_path,
                                               bg=self.labelBgColor,
                                               fg=self.fontColor)
        self.master.destinationText.grid(row=0, column=1, padx=(4, 0))

        browse_B = tk.Button(destination_frame,
                             text="Browse",
                             command=self.BrowseDestination,
                             width=10,
                             bg=self.labelBgColor,
                             fg=self.fontColor,
                             activebackground=self.btnHighlight,
                             activeforeground="black")
        browse_B.grid(row=0, column=2, padx=4)

        #Download frame
        download_btn_frame = LabelFrame(download_frame,
                                        bg=self.mainBgColor,
                                        bd=0)
        download_btn_frame.grid(row=3, column=0)

        download_b = tk.Button(download_btn_frame,
                               text="Download",
                               command=self.Download,
                               width=20,
                               bg=self.labelBgColor,
                               fg="white",
                               activebackground=self.btnHighlight,
                               activeforeground="black")
        download_b.grid(row=0, column=0)

        self.download_count = tk.Text(download_btn_frame,
                                      width=22,
                                      height=1,
                                      bg=self.mainBgColor,
                                      fg="white",
                                      bd=0)
        self.download_count.grid(row=0, column=1, padx=5)
        self.download_count.insert(tk.END, "Download status: ")
        self.download_count.insert(tk.END, self.downloadLeft[0])
        self.download_count.insert(tk.END, " / ")
        self.download_count.insert(tk.END, self.downloadLeft[1])

        ##VIDEO PLAYER

        #Playback buttons and volume sliders container
        playback_container_frame = LabelFrame(self.master,
                                              bg=self.mainBgColor,
                                              bd=0)
        playback_container_frame.grid(row=1, column=0, pady=5)

        #Container that has bottom and top frame for playback buttons
        buttons_container = LabelFrame(playback_container_frame,
                                       bg=self.mainBgColor,
                                       bd=0)
        buttons_container.grid(row=0, column=0)

        #Top frame (Empty)
        top_frame = LabelFrame(buttons_container,
                               bg=self.mainBgColor,
                               height=22,
                               bd=0)
        top_frame.grid(row=0, column=0)

        self.playback_buttons_frame = LabelFrame(buttons_container,
                                                 bg=self.mainBgColor,
                                                 bd=0)
        self.playback_buttons_frame.grid(row=1,
                                         column=0,
                                         padx=(50, 0),
                                         pady=(8, 0))

        prevVid = tk.Button(self.playback_buttons_frame,
                            text="Prev",
                            command=self.PreviousVideo,
                            width=10,
                            bg=self.labelBgColor,
                            fg="white",
                            activebackground=self.btnHighlight,
                            activeforeground="black")

        prevVid.grid(row=0, column=0)

        self.playButton = tk.Button(self.playback_buttons_frame,
                                    text="Play",
                                    command=self.PlayVideo,
                                    width=10,
                                    bg=self.labelBgColor,
                                    fg="white",
                                    activebackground=self.btnHighlight,
                                    activeforeground="black")
        self.playButton.grid(row=0, column=1)

        nextVid = tk.Button(self.playback_buttons_frame,
                            text="Next",
                            command=self.NextVideo,
                            width=10,
                            bg=self.labelBgColor,
                            fg="white",
                            activebackground=self.btnHighlight,
                            activeforeground="black")
        nextVid.grid(row=0, column=2)

        self.pauseButton = tk.Button(self.playback_buttons_frame,
                                     text="Pause",
                                     command=self.PauseVideo,
                                     width=10,
                                     bg=self.labelBgColor,
                                     fg="white",
                                     activebackground=self.btnHighlight,
                                     activeforeground="black")

        self.pauseButton.grid(row=0, column=3)

        #Volume slider and bass EQ
        eq_frame = LabelFrame(playback_container_frame,
                              bg=self.mainBgColor,
                              bd=0)
        eq_frame.grid(row=0, column=1, padx=50)

        volumeText = tk.Text(eq_frame,
                             width=10,
                             height=1,
                             bg=self.mainBgColor,
                             fg="white",
                             bd=0)
        volumeText.tag_configure("center", justify="center")
        volumeText.insert("1.0", "Volume")
        volumeText.tag_add("center", "1.0", "end")
        volumeText.grid(row=0, column=0, padx=(30, 0))
        volume_slider = Scale(eq_frame,
                              from_=0,
                              to=100,
                              orient=tk.HORIZONTAL,
                              bg=self.mainBgColor,
                              bd=0,
                              fg="white",
                              troughcolor=self.labelBgColor,
                              highlightbackground=self.mainBgColor,
                              activebackground="#FF00FF",
                              command=self.VolumeSlider,
                              length=150)

        volume_slider.grid(row=1, column=0, padx=(30, 0))
        volume_slider.set(self.curVolume)

        #EQ/Bass boost slider for future use
        #TODO:: Re-compile ffpyplayer module with custom sdl_audio_callback function call, which will take use DSP for changing the pitch of the audio
        #Bind this slider to callback the sdl_audio_callback custom function
        # bassText = tk.Text(eq_frame, width=10, height=1, bg=self.mainBgColor, fg="white", bd=0)
        # bassText.insert(tk.END, "Bass boost")
        # bassText.grid(row=0, column=1)
        # bass_slider = Scale(eq_frame, from_=0, to=100, orient=tk.HORIZONTAL,
        #                     bg=self.mainBgColor, bd=0, fg="white",
        #                     troughcolor=self.labelBgColor, highlightbackground=self.mainBgColor,
        #                     activebackground="#FF00FF")
        # bass_slider.grid(row=1, column=1)
        # bass_slider.set(self.curBassBoost)

        #Video stream frame
        video_player_frame = LabelFrame(self.master,
                                        bg=self.labelBgColor,
                                        bd=1)
        video_player_frame.grid(row=2, column=0, padx=17)

        self.video_embed = tk.Label(video_player_frame,
                                    text="Video",
                                    image=self.blank_img,
                                    bg=self.labelBgColor)
        self.video_embed.grid(row=0, column=0)

        self.now_playing = Text(video_player_frame,
                                width=50,
                                height=1,
                                bg=self.mainBgColor,
                                fg="white")
        self.now_playing.insert(tk.END, "Now playing: ")
        self.now_playing.grid(row=1, column=0)

        #Video queue frame
        queue_frame = LabelFrame(video_player_frame,
                                 bg=self.labelBgColor,
                                 bd=0)
        queue_frame.grid_rowconfigure(0, weight=0)
        queue_frame.grid_columnconfigure(0, weight=1)
        queue_frame.grid(row=0, column=1)

        queue_buttons = LabelFrame(queue_frame, bg=self.labelBgColor)
        queue_buttons.grid(row=0, column=0)

        browse_In = tk.Button(queue_buttons,
                              text="Browse",
                              command=self.BrowseInputFolder,
                              width=10,
                              bg=self.labelBgColor,
                              fg=self.fontColor,
                              bd=1,
                              activebackground=self.btnHighlight,
                              activeforeground="black")
        browse_In.grid(row=0, column=1)

        playAll = tk.Button(queue_buttons,
                            text="Select all",
                            command=self.SelectAll,
                            width=10,
                            bg=self.labelBgColor,
                            fg=self.fontColor,
                            bd=1,
                            activebackground=self.btnHighlight,
                            activeforeground="black")
        playAll.grid(row=0, column=2)

        self.video_list = tk.Listbox(queue_frame,
                                     font=("Helvetica", 12),
                                     selectmode=tk.EXTENDED,
                                     exportselection=0,
                                     height=9,
                                     bg=self.labelBgColor,
                                     fg=self.fontColor,
                                     bd=0,
                                     selectbackground=self.btnHighlight)
        self.video_list.grid(row=1, column=0)
        self.video_list.bind("<<ListboxSelect>>", self.listbox_sel_callback)
        #self.video_list.bind('<Double-Button>', self.PlayVideo)            #Double clicking video causes thread exceptions for some reason

        scrollbar = Scrollbar(queue_frame,
                              orient="vertical",
                              command=self.video_list.yview,
                              bg=self.labelBgColor,
                              highlightcolor=self.btnHighlight,
                              bd=0)
        self.video_list.config(yscrollcommand=scrollbar.set)
        scrollbar.grid(row=1, column=1, sticky='ns')

        self.curVolume = 50
        #Search bar for videos
        # search_bar = tk.Entry(queue_frame, bd=0, )
        # search_bar.grid(row=2,column=0)

    def listbox_sel_callback(self, event):
        self.playlist = []
        indices = self.video_list.curselection()
        for i in indices:
            self.playlist.append(self.video_list.get(i))
        self.playlistChanged = True

    def BrowseInputFolder(self):
        video_dir = filedialog.askdirectory(initialdir="C:\\YoutubeVideos")
        self.video_folder.set(video_dir)
        self.video_list.delete(0, tk.END)
        for root, dirs, files in os.walk(self.video_folder.get()):
            for filename in files:
                self.video_list.insert(tk.END, filename)

    def PlayVideo(self):
        global stop_thread
        stop_thread = True
        time.sleep(0.05)  #Dangerous way of waiting for thread lol
        stop_thread = False
        self.isPlaying = True
        self.playlistChanged = False

        # if self.selected_video >= 0 and self.selected_video < len(self.playlist):
        self.start_videostream()
        #self.video_player.set_volume(float(self.curVolume)/100)
        thread = threading.Thread(target=self.Video_data_stream)
        thread.daemon = 1
        thread.start()
        self.playButton.grid_forget()
        self.stopButton = tk.Button(self.playback_buttons_frame,
                                    text="Stop",
                                    command=self.StopVideo,
                                    width=10,
                                    bg="#FF00FF",
                                    fg="black")
        self.stopButton.grid(row=0, column=1)

        #Change the "now playing"
        self.changeNowPlaying()

    def changeNowPlaying(self):
        self.now_playing.delete("1.0", tk.END)
        self.now_playing.insert(tk.END, "Now playing: ")
        if self.isPlaying:
            self.now_playing.insert(tk.END, self.playlist[self.selected_video])

    def StopVideo(self):
        global stop_thread
        global pause_thread
        self.isPlaying = False
        stop_thread = True
        pause_thread = True
        self.PauseVideo()

        self.stopButton.grid_forget()
        self.playButton = tk.Button(self.playback_buttons_frame,
                                    text="Play",
                                    command=self.PlayVideo,
                                    width=10,
                                    bg=self.labelBgColor,
                                    fg=self.fontColor)
        self.playButton.grid(row=0, column=1)
        self.changeNowPlaying()

    def PauseVideo(self):
        global pause_thread
        if pause_thread:
            #Why isn't this done in play/stop aswell lol
            self.pauseButton.config(text="Pause",
                                    bg=self.labelBgColor,
                                    fg="white")
            pause_thread = False
            self.video_player.set_pause(False)
        else:
            self.pauseButton.config(text="Unpause",
                                    bg=self.btnHighlight,
                                    fg="black")
            pause_thread = True
            self.video_player.set_pause(True)

    def start_videostream(self):
        #Start new instance of player
        if self.video_player:
            self.video_player.close_player()
        cVol = float(self.curVolume) / 100
        print(cVol)
        self.video_player = MediaPlayer(self.video_folder.get() + "\\" +
                                        self.playlist[self.selected_video],
                                        ff_opts={
                                            'paused': True,
                                            'volume': 0.03
                                        })
        self.video_player.set_size(400, 200)
        #while not self.video_player:
        #    continue
        time.sleep(0.1)
        if self.video_player:
            self.video_player.set_volume(cVol)
        self.video_player.set_pause(False)

    def NextVideo(self):
        if self.isPlaying == False:
            return
        #Destroy current player if there's one
        self.video_player.close_player()

        #Inform the video stream that video was changed
        self.songChanged = True

        #If playlist was changed, reset the index to 0
        if self.playlistChanged:
            self.selected_video = 0
            self.playlistChanged = False
        #Other wise just increment idx or start from 0 idx
        elif self.selected_video < len(self.playlist) - 1:
            self.selected_video += 1
        else:
            self.selected_video = 0

        self.start_videostream()
        #self.video_player.set_volume(float(self.curVolume)/100)
        self.changeNowPlaying()

    def PreviousVideo(self):
        if self.isPlaying == False:
            return

        #Destroy current player if there's one
        self.video_player.close_player()

        self.songChanged = True

        if self.playlistChanged:
            self.selected_video = 0
            self.playlistChanged = False
        elif self.selected_video > 0:
            self.selected_video -= 1
        else:
            self.selected_video = len(self.playlist) - 1

        self.start_videostream()
        self.changeNowPlaying()

    def SelectAll(self):
        #Select every line in listbox / Every video from list
        for i in range(0, self.video_list.size()):
            self.video_list.selection_set(i)
        #Since manual selection doesn't call callback functions, just add them to playlist manually
        self.playlist = []
        indices = self.video_list.curselection()
        for i in indices:
            self.playlist.append(self.video_list.get(i))
        self.playlistChanged = True

    def VolumeSlider(self, value):
        if self.video_player:
            self.video_player.set_volume(float(value) / 100)
        self.curVolume = value

    def Video_data_stream(self):
        global stop_thread
        global pause_thread
        stop_thread = False
        pause_thread = False

        #Start video/audio stream
        #todo:: len(self.playlist will change)
        while True:
            try:

                frame, val = self.video_player.get_frame()
                if val == 'eof':
                    self.video_player.close_player()
                    self.NextVideo()  #Increment the video index
                    self.video_player.set_volume(float(self.curVolume) / 100)
                    #If we still have videos left in playlist, play another one
                    # if self.selected_video < len(self.playlist):
                    #     self.video_player = MediaPlayer(self.video_folder.get() + "\\" + self.playlist[self.selected_video])
                    #     self.video_player.set_size(400, 200)
                elif frame is None:
                    time.sleep(0.01)
                else:
                    image, t = frame
                    w, h = image.get_size()
                    img = np.asarray(image.to_bytearray()[0]).reshape(h, w, 3)
                    the_frame = ImageTk.PhotoImage(Image.fromarray(img))
                    self.video_embed.config(image=the_frame)
                    self.video_embed.image = the_frame
                    if stop_thread:
                        self.video_player.close_player()
                        #Reset the embed image
                        self.video_embed.config(image=self.blank_img)
                        return
                    while pause_thread:
                        #Do nothing
                        if stop_thread:
                            pause_thread = False
                            return
                        continue
                    if val <= 1:
                        time.sleep(val)
            except:
                #Exception (e.g outside thread changes to player can cause exception)
                continue

    def BrowseDestination(self):
        download_directory = filedialog.askdirectory(
            initialdir="C:\\YoutubeVideos")
        self.download_path.set(download_directory)

    def Download(self):
        self.Update_Download_Status()

        link = self.video_link.get()
        download_folder = self.download_path.get()
        if "list" in link:
            playlist = Playlist(link)
            thread = threading.Thread(target=self.Download_Playlist,
                                      args=(
                                          playlist,
                                          download_folder,
                                      ))
            thread.daemon = 1
            thread.start()
        else:
            thread = threading.Thread(target=self.Download_Single,
                                      args=(
                                          link,
                                          download_folder,
                                      ))
            thread.daemon = 1
            thread.start()

    def Download_Playlist(self, playlist, folder):
        self.downloadLeft = [0, len(playlist.video_urls)]
        for url in playlist.video_urls:
            self.Update_Download_Status()
            try:
                getVideo = YouTube(url)
                video_stream_buffer = getVideo.streams.first()
                video_stream_buffer.download(folder)
                self.downloadLeft[0] += 1
            except:
                if self.downloadLeft[1] > 0:
                    self.downloadLeft[1] -= 1
                continue
        self.Update_Download_Status()
        messagebox.showinfo("Download complete!",
                            "Downloaded videos from playlist to:\n" + folder)

    def Download_Single(self, link, folder):
        self.downloadLeft = [0, 1]
        try:
            self.Update_Download_Status()
            getVideo = YouTube(link)

            video_stream_buffer = getVideo.streams.first()
            video_stream_buffer.download(folder)
            self.downloadLeft = [1, 1]
            messagebox.showinfo("Download complete!",
                                "Downloaded video to:\n" + folder)
        except:
            messagebox.showinfo("Download failed!",
                                "Video not available:\n" + folder)
        self.Update_Download_Status()

    def Update_Download_Status(self):
        self.download_count.delete('1.0', tk.END)
        self.download_count.insert(tk.END, "Download status: ")
        self.download_count.insert(tk.END, self.downloadLeft[0])
        self.download_count.insert(tk.END, " / ")
        self.download_count.insert(tk.END, self.downloadLeft[1])
예제 #5
0
class VideoPlayer:
    def __init__(self):
        self._widget = None
        self._player = None
        self._timer = None
        self._frame = None
        self._texture = None
        self._trigger = Clock.create_trigger(self._redraw)

    def toggle_playback(self, widget):
        if self._widget == widget:
            if self._player.get_pause():
                plugins.audio.audio_player.pause_playback()
                self._player.set_pause(False)
                self._widget.video_state = 'play'
                Clock.schedule_once(self._next_frame)
            else:
                self.pause_playback()
        else:
            plugins.audio.audio_player.pause_playback()
            if self._widget is not None:
                self.pause_playback()
            self._widget = widget
            self._widget.video_state = 'play'
            self._texture = None
            self._player = MediaPlayer(filename=self._widget.video_source,
                                       ff_opts={
                                           'paused': True,
                                           'ss': self._widget.video_pos
                                       })
            Clock.schedule_interval(self._start_playback, .1)

    def _start_playback(self, dt):
        if self._player.get_metadata()['duration'] is None:
            return
        if self._player.get_pause():
            self._player.set_pause(False)
        Clock.schedule_once(self._next_frame, 0)
        return False

    def pause_playback(self):
        if self._timer is not None:
            self._timer.cancel()
        if self._player is not None:
            self._player.set_pause(True)
        self._frame = None
        self._texture = None
        if self._widget is not None:
            self._widget.video_state = 'pause'

    def update_video_pos(self, widget, pts):
        if self._widget == widget and self._player is not None:
            self._player.seek(pts=pts, relative=False, accurate=True)
        widget.video_pos = pts

    def _next_frame(self, dt):
        frame, val = self._player.get_frame()
        if val == 'eof':
            self._player.set_pause(True)
            self._player.seek(pts=0, relative=False, accurate=True)
            self._widget.video_image.texture = self._widget.video_cover_image_texture
            self._widget.video_state = 'pause'
            self._widget.video_pos = 0
        elif val == 'paused':
            return
        elif frame is None:
            Clock.schedule_once(self._next_frame, 1 / 100)
        else:
            val = val if val else 1 / 30
            self._frame = frame
            self._trigger()
            Clock.schedule_once(self._next_frame, val)

    def _redraw(self, dt):
        if self._player.get_pause() is None or self._frame is None:
            return
        img, pts = self._frame
        if self._texture is None:
            self._texture = Texture.create(size=img.get_size(), colorfmt='rgb')
            self._texture.flip_vertical()
        self._texture.blit_buffer(img.to_memoryview()[0])
        self._widget.video_image.texture = None
        self._widget.video_image.texture = self._texture
        self._widget.video_pos = pts
예제 #6
0
class PlayerThread(QThread):
    image_sig = pyqtSignal(QtGui.QImage)
    status_sig = pyqtSignal(bool)
    progress_sig = pyqtSignal(float)

    def __init__(self, parent):
        super().__init__(parent)
        self.label = parent.label
        self.image_sig.connect(parent.set_image)
        self.status_sig.connect(parent.set_status)
        self.progress_sig.connect(parent.set_progress)
        self.player = None
        self.duration = None
        self.progress = 0
        self.ratio_mode = Qt.KeepAspectRatio
        self.config = {}

    def set_video_name(self, video_name):
        if self.player is not None:
            self.player.close_player()
        self.player = MediaPlayer(video_name)
        self.status_sig.emit(self.player.get_pause())
        self.start()

    def set_config(self, config):
        self.config = config

    def close(self):
        if self.player is not None:
            self.player.close_player()
        self.quit()

    def pause(self):
        if self.player is not None:
            self.player.set_pause(True)
            self.status_sig.emit(True)

    def toggle_pause(self):
        if self.player is not None:
            self.player.toggle_pause()
            self.status_sig.emit(self.player.get_pause())

    def next_prev(self, is_forward):
        if self.player is not None:
            chunk_position = self.find_chunk(self.progress)
            if is_forward:
                if chunk_position < self.config['total'] - 1:
                    chunk_position += 1
                    self.player.seek(self.config['chunks'][chunk_position][0] / 1000, relative=False, accurate=False)
            else:
                if chunk_position > 0:
                    chunk_position -= 1
                self.player.seek(self.config['chunks'][chunk_position][0] / 1000, relative=False, accurate=False)

    def find_chunk(self, pts):
        if self.config:
            pts_ms = int(1000 * pts)
            front = 0
            rear = self.config['total'] - 1
            chunks = self.config['chunks']
            while front != rear:
                middle = (front + rear) // 2
                if pts_ms > chunks[middle][0]:
                    if pts_ms < chunks[middle + 1][0]:
                        break
                    else:
                        front = middle + 1
                else:
                    rear = middle
            return (front + rear) // 2
        else:
            return 0

    def seek(self, ratio):
        if self.duration is not None:
            pts = ratio * self.duration
            self.player.seek(pts, relative=False, accurate=False)

    def image_stretch(self, is_stretch):
        if is_stretch:
            self.ratio_mode = Qt.IgnoreAspectRatio
        else:
            self.ratio_mode = Qt.KeepAspectRatio

    def run(self):
        val = ''
        while val != 'eof':
            frame, val = self.player.get_frame()
            if self.duration is None:
                self.duration = self.player.get_metadata()['duration']
            if val != 'eof' and frame is not None:
                img, t = frame
                if img is not None:
                    byte = img.to_bytearray()[0]
                    width, height = img.get_size()
                    convert_to_qt_format = QtGui.QImage(byte, width, height, QImage.Format_RGB888)
                    p = convert_to_qt_format.scaled(self.label.width(), self.label.height(), self.ratio_mode)
                    self.image_sig.emit(p)
                    self.progress = t
                    if self.duration is not None:
                        self.progress_sig.emit(t / self.duration)
                    time.sleep(val)
예제 #7
0
def video(request):
    if (os.path.exists(str(d) + '/media/output/screenshots/')):
        shutil.rmtree(str(d) + '/media/output/screenshots/')
    if not os.path.exists(str(d) + '/media/output/screenshots/'):
        os.mkdir(str(d) + '/media/output/screenshots/')

    global cropping, refPt, rect, waitTime, j
    refPt = []
    if 'y_submitted' in request.POST:
        import pafy, youtube_dl
        url2 = request.POST.get('youtubeurl')
        vPafy = pafy.new(url2)
        play = vPafy.getbest(preftype="mp4")

        cv2.namedWindow('Video')
        cv2.moveWindow('Video', 250, 150)

        vidcap = cv2.VideoCapture(play.url)

        tots = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)
        i = 0
        cv2.createTrackbar('S', 'Video', 0, int(tots) - 1, flick)
        cv2.setTrackbarPos('S', 'Video', 0)
        status = 'stay'
        player = MediaPlayer(str(play.url))
        while (vidcap.isOpened()):

            try:
                if i == tots - 1:
                    i = 0
                vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
                success, frame = vidcap.read()
                r = 750.0 / frame.shape[1]
                dim = (750, int(frame.shape[0] * r))
                frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
                if frame.shape[0] > 600:
                    frame = cv2.resize(frame, (500, 500))

                audio_frame, val = player.get_frame()

                if val != 'eof' and audio_frame is not None:
                    img, t = audio_frame

                cv2.setMouseCallback('Video', click_and_crop)

                if cropping == 3:
                    roi = frame[refPt[0][1]:refPt[1][1],
                                refPt[0][0]:refPt[1][0]]
                    cv2.rectangle(frame, refPt[0], refPt[1], (0, 255, 0), 0)
                    print(refPt)

                    cv2.imwrite(
                        d + '/media/output/screenshots/' + str(j) + '.jpg',
                        roi)
                    j = j + 1
                    cropping = 1

                cv2.imshow('Video', frame)

                status = {
                    ord('s'): 'stay',
                    ord('S'): 'stay',
                    ord('w'): 'play',
                    ord('W'): 'play',
                    -1: status,
                    27: 'exit'
                }[cv2.waitKey(28)]

                if status == 'play':
                    i += 1
                    cv2.setTrackbarPos('S', 'Video', i)
                    player.set_pause(False)
                    continue
                if status == 'stay':
                    i = cv2.getTrackbarPos('S', 'Video')

                    player.set_pause(True)
                if status == 'exit':
                    break

                if (cv2.waitKey(1) & 0xFF == ord('q')):
                    break
            except KeyError:
                print("Invalid Key was pressed")
        cv2.destroyWindow('Video')
        vidcap.release()
        cv2.destroyAllWindows()
    elif 'v_submitted' in request.POST:

        myfile = request.FILES['video']
        print(myfile)

        fs = FileSystemStorage()
        #filename = fs.save(myfile.name, myfile)
        filename = fs.save('video', myfile)

        uploaded_file_url = fs.url(filename)

        cv2.namedWindow('Video')
        cv2.moveWindow('Video', 250, 150)

        vidcap = cv2.VideoCapture(d + uploaded_file_url)
        tots = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)

        i = 0
        cv2.createTrackbar('S', 'Video', 0, int(tots) - 1, flick)
        cv2.setTrackbarPos('S', 'Video', 0)

        status = 'stay'

        player = MediaPlayer(d + uploaded_file_url)

        while (vidcap.isOpened()):

            try:
                if i == tots - 1:
                    i = 0
                vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
                success, frame = vidcap.read()

                r = 750.0 / frame.shape[1]
                dim = (750, int(frame.shape[0] * r))
                frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
                if frame.shape[0] > 600:
                    frame = cv2.resize(frame, (500, 500))

                audio_frame, val = player.get_frame()

                if val != 'eof' and audio_frame is not None:
                    img, t = audio_frame

                cv2.setMouseCallback('Video', click_and_crop)

                if cropping == 3:

                    roi = frame[refPt[0][1] + 1:refPt[1][1],
                                refPt[0][0] + 1:refPt[1][0]]
                    cv2.rectangle(frame, refPt[0], refPt[1], (0, 255, 0), 0)
                    print(refPt)

                    cv2.imwrite(
                        d + '/media/output/screenshots/' + str(j) + '.jpg',
                        roi)
                    j = j + 1
                    cropping = 1

                cv2.imshow('Video', frame)
                status = {
                    ord('s'): 'stay',
                    ord('S'): 'stay',
                    ord('P'): 'play',
                    ord('p'): 'play',
                    -1: status,
                    27: 'exit'
                }[cv2.waitKey(28)]

                if status == 'play':
                    i += 1
                    cv2.setTrackbarPos('S', 'Video', i)
                    player.set_pause(False)
                    continue
                if status == 'stay':
                    i = cv2.getTrackbarPos('S', 'Video')

                    player.set_pause(True)
                if status == 'exit':
                    break
                if (cv2.waitKey(1) & 0xFF == ord('q')):
                    break
            except KeyError:
                print("Invalid Key was pressed")
        cv2.destroyWindow('Video')
        vidcap.release()
        cv2.destroyAllWindows()
    context = {}
    file = open('file.txt', 'w')
    file.write("")
    for img in os.listdir(d + '/media/output/screenshots'):
        if os.path.exists(str(d) + '/media/line/'):
            shutil.rmtree(str(d) + '/media/line/')
        if os.path.exists(str(d) + '/media/output/wordfiles/'):
            shutil.rmtree(str(d) + '/media/output/wordfiles/')
        if os.path.exists(str(d) + '/media/output/charfiles/'):
            shutil.rmtree(str(d) + '/media/output/charfiles/')

        if not os.path.exists(str(d) + '/media/line'):
            os.mkdir(str(d) + '/media/line')
        if not os.path.exists(str(d) + '/media/output/wordfiles'):
            os.mkdir(str(d) + '/media/output/wordfiles')
        if not os.path.exists(str(d) + '/media/output/charfiles'):
            os.mkdir(str(d) + '/media/output/charfiles')

        line_detect(d + '/media/output/screenshots/' + img)
        word_detect(d + '/media/line')
        char_detect()
        document()
        file = open("file.txt", 'r')
        data = file.read()
        context = {'data': data}
    return render(request, 'app/index.html', context)
예제 #8
0
class CustomImage(KivyImage):
    """Custom image display widget.
    Enables editing operations, displaying them in real-time using a low resolution preview of the original image file.
    All editing variables are watched by the widget and it will automatically update the preview when they are changed.
    """

    exif = ''
    pixel_format = ''
    length = NumericProperty(0)
    framerate = ListProperty()
    video = BooleanProperty(False)
    player = ObjectProperty(None, allownone=True)
    position = NumericProperty(0.0)
    start_point = NumericProperty(0.0)
    end_point = NumericProperty(1.0)
    original_image = ObjectProperty()
    photoinfo = ListProperty()
    original_width = NumericProperty(0)
    original_height = NumericProperty(0)
    flip_horizontal = BooleanProperty(False)
    flip_vertical = BooleanProperty(False)
    mirror = BooleanProperty(False)
    angle = NumericProperty(0)
    rotate_angle = NumericProperty(0)
    fine_angle = NumericProperty(0)
    brightness = NumericProperty(0)
    shadow = NumericProperty(0)
    contrast = NumericProperty(0)
    gamma = NumericProperty(0)
    saturation = NumericProperty(0)
    temperature = NumericProperty(0)
    tint = ListProperty([1.0, 1.0, 1.0, 1.0])
    curve = ListProperty()
    crop_top = NumericProperty(0)
    crop_bottom = NumericProperty(0)
    crop_left = NumericProperty(0)
    crop_right = NumericProperty(0)
    filter = StringProperty('')
    filter_amount = NumericProperty(0)
    autocontrast = BooleanProperty(False)
    equalize = NumericProperty(0)
    histogram = ListProperty()
    edit_image = ObjectProperty()
    cropping = BooleanProperty(False)
    touch_point = ObjectProperty()
    active_cropping = BooleanProperty(False)
    crop_start = ListProperty()
    sharpen = NumericProperty(0)
    bilateral = NumericProperty(0.5)
    bilateral_amount = NumericProperty(0)
    median_blur = NumericProperty(0)
    vignette_amount = NumericProperty(0)
    vignette_size = NumericProperty(.5)
    edge_blur_amount = NumericProperty(0)
    edge_blur_size = NumericProperty(.5)
    edge_blur_intensity = NumericProperty(.5)
    cropper = ObjectProperty()  #Holder for the cropper overlay
    crop_controls = ObjectProperty()  #Holder for the cropper edit panel object
    adaptive_clip = NumericProperty(0)
    border_opacity = NumericProperty(1)
    border_image = ListProperty()
    border_tint = ListProperty([1.0, 1.0, 1.0, 1.0])
    border_x_scale = NumericProperty(.5)
    border_y_scale = NumericProperty(.5)
    crop_min = NumericProperty(100)
    size_multiple = NumericProperty(1)

    #Denoising variables
    denoise = BooleanProperty(False)
    luminance_denoise = NumericProperty(10)
    color_denoise = NumericProperty(10)
    search_window = NumericProperty(15)
    block_size = NumericProperty(5)

    frame_number = 0
    max_frames = 0
    start_seconds = 0
    first_frame = None

    def start_video_convert(self):
        self.close_video()
        self.player = MediaPlayer(self.source,
                                  ff_opts={
                                      'paused': True,
                                      'ss': 0.0,
                                      'an': True
                                  })
        self.player.set_volume(0)
        self.frame_number = 0
        if self.start_point > 0 or self.end_point < 1:
            all_frames = self.length * (self.framerate[0] / self.framerate[1])
            self.max_frames = all_frames * (self.end_point - self.start_point)
        else:
            self.max_frames = 0

        #need to wait for load so the seek routine doesnt crash python
        self.first_frame = self.wait_frame()

        if self.start_point > 0:
            self.start_seconds = self.length * self.start_point
            self.first_frame = self.seek_player(self.start_seconds)

    def wait_frame(self):
        #Ensures that a frame is gotten
        frame = None
        while not frame:
            frame, value = self.player.get_frame(force_refresh=True)
        return frame

    def start_seek(self, seek):
        #tell the player to seek to a position
        self.player.set_pause(False)
        self.player.seek(pts=seek, relative=False, accurate=True)
        self.player.set_pause(True)

    def seek_player(self, seek):
        self.start_seek(seek)

        framerate = self.framerate[0] / self.framerate[1]
        target_seek_frame = seek * framerate

        loops = 0
        total_loops = 0
        while True:
            loops += 1
            total_loops += 1
            if loops > 5:
                #seek has been stuck for a while, try to seek again
                self.start_seek(seek)
                loops = 0
            #check if seek has gotten within a couple frames yet
            frame = self.wait_frame()
            current_seek = frame[1]
            current_seek_frame = current_seek * framerate
            frame_distance = abs(target_seek_frame - current_seek_frame)
            if frame_distance < 2 or total_loops >= 30:
                #seek has finished, or give up after a lot of tries to not freeze the program...
                break
        return frame

    def get_converted_frame(self):
        if self.first_frame:
            frame = self.first_frame
            self.first_frame = None
        else:
            self.player.set_pause(False)
            frame = None
            while not frame:
                frame, value = self.player.get_frame(force_refresh=False)
                if value == 'eof':
                    return None
            self.player.set_pause(True)
        self.frame_number = self.frame_number + 1
        if self.max_frames:
            if self.frame_number > self.max_frames:
                return None
        frame_image = frame[0]
        frame_size = frame_image.get_size()
        frame_converter = SWScale(frame_size[0],
                                  frame_size[1],
                                  frame_image.get_pixel_format(),
                                  ofmt='rgb24')
        new_frame = frame_converter.scale(frame_image)
        image_data = bytes(new_frame.to_bytearray()[0])
        image = Image.frombuffer(mode='RGB',
                                 size=(frame_size[0], frame_size[1]),
                                 data=image_data,
                                 decoder_name='raw')
        #for some reason, video frames are read upside-down? fix it here...
        image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        image = self.adjust_image(image, preview=False)
        return [image, frame[1]]

    def close_video(self):
        if self.player:
            self.player.close_player()
            self.player = None

    def open_video(self):
        self.player = MediaPlayer(self.source,
                                  ff_opts={
                                      'paused': True,
                                      'ss': 1.0,
                                      'an': True
                                  })
        frame = None
        while not frame:
            frame, value = self.player.get_frame(force_refresh=True)
        data = self.player.get_metadata()
        self.length = data['duration']
        self.framerate = data['frame_rate']
        self.pixel_format = data['src_pix_fmt']

    def set_aspect(self, aspect_x, aspect_y):
        """Adjusts the cropping of the image to be a given aspect ratio.
        Attempts to keep the image as large as possible
        Arguments:
            aspect_x: Horizontal aspect ratio element, numerical value.
            aspect_y: Vertical aspect ratio element, numerical value.
        """

        width = self.original_width - self.crop_left - self.crop_right
        height = self.original_height - self.crop_top - self.crop_bottom
        if aspect_x != width or aspect_y != height:
            current_ratio = width / height
            target_ratio = aspect_x / aspect_y
            if target_ratio > current_ratio:
                #crop top/bottom, width is the same
                new_height = width / target_ratio
                height_difference = height - new_height
                crop_right = 0
                crop_left = 0
                crop_top = height_difference / 2
                crop_bottom = crop_top
            else:
                #crop sides, height is the same
                new_width = height * target_ratio
                width_difference = width - new_width
                crop_top = 0
                crop_bottom = 0
                crop_left = width_difference / 2
                crop_right = crop_left
        else:
            crop_top = 0
            crop_right = 0
            crop_bottom = 0
            crop_left = 0
        self.crop_top = self.crop_top + crop_top
        self.crop_right = self.crop_right + crop_right
        self.crop_bottom = self.crop_bottom + crop_bottom
        self.crop_left = self.crop_left + crop_left
        self.reset_cropper()

    def crop_percent(self, side, percent):
        texture_width = self.original_width
        texture_height = self.original_height
        crop_min = self.crop_min

        if side == 'top':
            crop_amount = texture_height * percent
            if (texture_height - crop_amount - self.crop_bottom) < crop_min:
                crop_amount = texture_height - self.crop_bottom - crop_min
            self.crop_top = crop_amount
        elif side == 'right':
            crop_amount = texture_width * percent
            if (texture_width - crop_amount - self.crop_left) < crop_min:
                crop_amount = texture_width - self.crop_left - crop_min
            self.crop_right = crop_amount
        elif side == 'bottom':
            crop_amount = texture_height * percent
            if (texture_height - crop_amount - self.crop_top) < crop_min:
                crop_amount = texture_height - self.crop_top - crop_min
            self.crop_bottom = crop_amount
        else:
            crop_amount = texture_width * percent
            if (texture_width - crop_amount - self.crop_right) < crop_min:
                crop_amount = texture_width - self.crop_right - crop_min
            self.crop_left = crop_amount
        self.reset_cropper()
        if self.crop_controls:
            self.crop_controls.update_crop()

    def get_crop_percent(self):
        width = self.original_width
        height = self.original_height
        top_percent = self.crop_top / height
        right_percent = self.crop_right / width
        bottom_percent = self.crop_bottom / height
        left_percent = self.crop_left / width
        return [top_percent, right_percent, bottom_percent, left_percent]

    def get_crop_size(self):
        new_width = self.original_width - self.crop_left - self.crop_right
        new_height = self.original_height - self.crop_top - self.crop_bottom
        new_aspect = new_width / new_height
        old_aspect = self.original_width / self.original_height
        return "Size: " + str(
            int(new_width)) + "x" + str(int(new_height)) + ", Aspect: " + str(
                round(new_aspect, 2)) + " (Original: " + str(
                    round(old_aspect, 2)) + ")"

    def reset_crop(self):
        """Sets the crop values back to 0 for all sides"""

        self.crop_top = 0
        self.crop_bottom = 0
        self.crop_left = 0
        self.crop_right = 0
        self.reset_cropper(setup=True)

    def reset_cropper(self, setup=False):
        """Updates the position and size of the cropper overlay object."""

        if self.cropper:
            texture_size = self.get_texture_size()
            texture_top_edge = texture_size[0]
            texture_right_edge = texture_size[1]
            texture_bottom_edge = texture_size[2]
            texture_left_edge = texture_size[3]

            texture_width = (texture_right_edge - texture_left_edge)
            #texture_height = (texture_top_edge - texture_bottom_edge)

            divisor = self.original_width / texture_width
            top_edge = texture_top_edge - (self.crop_top / divisor)
            bottom_edge = texture_bottom_edge + (self.crop_bottom / divisor)
            left_edge = texture_left_edge + (self.crop_left / divisor)
            right_edge = texture_right_edge - (self.crop_right / divisor)
            width = right_edge - left_edge
            height = top_edge - bottom_edge

            self.cropper.pos = [left_edge, bottom_edge]
            self.cropper.size = [width, height]
            if setup:
                self.cropper.max_resizable_width = width
                self.cropper.max_resizable_height = height

    def get_texture_size(self):
        """Returns a list of the texture size coordinates.
        Returns:
            List of numbers: [Top edge, Right edge, Bottom edge, Left edge]
        """

        left_edge = (self.size[0] / 2) - (self.norm_image_size[0] / 2)
        right_edge = left_edge + self.norm_image_size[0]
        bottom_edge = (self.size[1] / 2) - (self.norm_image_size[1] / 2)
        top_edge = bottom_edge + self.norm_image_size[1]
        return [top_edge, right_edge, bottom_edge, left_edge]

    def point_over_texture(self, pos):
        """Checks if the given pos (x,y) value is over the image texture.
        Returns False if not over texture, returns point transformed to texture coordinates if over texture.
        """

        texture_size = self.get_texture_size()
        top_edge = texture_size[0]
        right_edge = texture_size[1]
        bottom_edge = texture_size[2]
        left_edge = texture_size[3]
        if pos[0] > left_edge and pos[0] < right_edge:
            if pos[1] > bottom_edge and pos[1] < top_edge:
                texture_x = pos[0] - left_edge
                texture_y = pos[1] - bottom_edge
                return [texture_x, texture_y]
        return False

    def detect_crop_edges(self, first, second):
        """Given two points, this will detect the proper crop area for the image.
        Arguments:
            first: First crop corner.
            second: Second crop corner.
        Returns a list of cropping values:
            [crop_top, crop_bottom, crop_left, crop_right]
        """

        if first[0] < second[0]:
            left = first[0]
            right = second[0]
        else:
            left = second[0]
            right = first[0]
        if first[1] < second[1]:
            top = second[1]
            bottom = first[1]
        else:
            top = first[1]
            bottom = second[1]
        scale = self.original_width / self.norm_image_size[0]
        crop_top = (self.norm_image_size[1] - top) * scale
        crop_bottom = bottom * scale
        crop_left = left * scale
        crop_right = (self.norm_image_size[0] - right) * scale
        return [crop_top, crop_bottom, crop_left, crop_right]

    def set_crop(self, posx, posy, width, height):
        """Sets the crop values based on the cropper widget."""

        texture_size = self.get_texture_size()
        texture_top_edge = texture_size[0]
        texture_right_edge = texture_size[1]
        texture_bottom_edge = texture_size[2]
        texture_left_edge = texture_size[3]

        left_crop = posx - texture_left_edge
        bottom_crop = posy - texture_bottom_edge
        right_crop = texture_right_edge - width - posx
        top_crop = texture_top_edge - height - posy

        texture_width = (texture_right_edge - texture_left_edge)
        divisor = self.original_width / texture_width
        if left_crop < 0:
            self.crop_left = 0
        else:
            self.crop_left = left_crop * divisor
        if right_crop < 0:
            self.crop_right = 0
        else:
            self.crop_right = right_crop * divisor
        if top_crop < 0:
            self.crop_top = 0
        else:
            self.crop_top = top_crop * divisor
        if bottom_crop < 0:
            self.crop_bottom = 0
        else:
            self.crop_bottom = bottom_crop * divisor
        #self.update_preview(recrop=False)
        if self.crop_controls:
            self.crop_controls.update_crop()

    def on_sharpen(self, *_):
        self.update_preview()

    def on_bilateral(self, *_):
        self.update_preview()

    def on_bilateral_amount(self, *_):
        self.update_preview()

    def on_median_blur(self, *_):
        self.update_preview()

    def on_border_opacity(self, *_):
        self.update_preview()

    def on_border_image(self, *_):
        self.update_preview()

    def on_border_x_scale(self, *_):
        self.update_preview()

    def on_border_y_scale(self, *_):
        self.update_preview()

    def on_vignette_amount(self, *_):
        self.update_preview()

    def on_vignette_size(self, *_):
        self.update_preview()

    def on_edge_blur_amount(self, *_):
        self.update_preview()

    def on_edge_blur_size(self, *_):
        self.update_preview()

    def on_edge_blur_intensity(self, *_):
        self.update_preview()

    def on_rotate_angle(self, *_):
        self.update_preview()

    def on_fine_angle(self, *_):
        self.update_preview()

    def on_flip_horizontal(self, *_):
        self.update_preview()

    def on_flip_vertical(self, *_):
        self.update_preview()

    def on_autocontrast(self, *_):
        self.update_preview()

    def on_adaptive_clip(self, *_):
        self.update_preview()

    def on_equalize(self, *_):
        self.update_preview()

    def on_brightness(self, *_):
        self.update_preview()

    def on_shadow(self, *_):
        self.update_preview()

    def on_gamma(self, *_):
        self.update_preview()

    def on_contrast(self, *_):
        self.update_preview()

    def on_saturation(self, *_):
        self.update_preview()

    def on_temperature(self, *_):
        self.update_preview()

    def on_curve(self, *_):
        self.update_preview()

    def on_tint(self, *_):
        self.update_preview()

    def on_border_tint(self, *_):
        self.update_preview()

    def on_size(self, *_):
        pass

    def on_source(self, *_):
        """The source file has been changed, reload image and regenerate preview."""

        self.video = os.path.splitext(self.source)[1].lower() in movietypes
        if self.video:
            self.open_video()
        self.reload_edit_image()
        self.update_texture(self.edit_image)
        #self.update_preview()

    def on_position(self, *_):
        pass

    def reload_edit_image(self):
        """Regenerate the edit preview image."""
        if self.video:
            if not self.player:
                return
            location = self.length * self.position
            frame = self.seek_player(location)
            frame = frame[0]
            frame_size = frame.get_size()
            pixel_format = frame.get_pixel_format()
            frame_converter = SWScale(frame_size[0],
                                      frame_size[1],
                                      pixel_format,
                                      ofmt='rgb24')
            new_frame = frame_converter.scale(frame)
            image_data = bytes(new_frame.to_bytearray()[0])

            original_image = Image.frombuffer(mode='RGB',
                                              size=(frame_size[0],
                                                    frame_size[1]),
                                              data=image_data,
                                              decoder_name='raw')
            #for some reason, video frames are read upside-down? fix it here...
            original_image = original_image.transpose(
                PIL.Image.FLIP_TOP_BOTTOM)
            self.original_width = original_image.size[0]
            self.original_height = original_image.size[1]
            self.original_image = original_image
            image = original_image.copy()

        else:
            original_image = Image.open(self.source)
            try:
                self.exif = original_image.info.get('exif', b'')
            except:
                self.exif = ''
            if self.angle != 0:
                if self.angle == 90:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_90)
                if self.angle == 180:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_180)
                if self.angle == 270:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_270)
            self.original_width = original_image.size[0]
            self.original_height = original_image.size[1]
            image = original_image.copy()
            self.original_image = original_image.copy()
            original_image.close()
        image_width = Window.width * .75
        width = int(image_width)
        height = int(image_width * (image.size[1] / image.size[0]))
        if width < 10:
            width = 10
        if height < 10:
            height = 10
        image = image.resize((width, height))
        if image.mode != 'RGB':
            image = image.convert('RGB')
        self.size_multiple = self.original_width / image.size[0]
        self.edit_image = image
        Clock.schedule_once(
            self.update_histogram
        )  #Need to delay this because kivy will mess up the drawing of it on first load.
        #self.histogram = image.histogram()

    def update_histogram(self, *_):
        self.histogram = self.edit_image.histogram()

    def on_texture(self, instance, value):
        if value is not None:
            self.texture_size = list(value.size)
        if self.mirror:
            self.texture.flip_horizontal()

    def denoise_preview(self, width, height, pos_x, pos_y):
        left = pos_x
        right = pos_x + width
        lower = pos_y + width
        upper = pos_y
        original_image = self.original_image
        preview = original_image.crop(box=(left, upper, right, lower))
        if preview.mode != 'RGB':
            preview = preview.convert('RGB')
        preview_cv = cv2.cvtColor(numpy.array(preview), cv2.COLOR_RGB2BGR)
        preview_cv = cv2.fastNlMeansDenoisingColored(preview_cv, None,
                                                     self.luminance_denoise,
                                                     self.color_denoise,
                                                     self.search_window,
                                                     self.block_size)
        preview_cv = cv2.cvtColor(preview_cv, cv2.COLOR_BGR2RGB)
        preview = Image.fromarray(preview_cv)
        preview_bytes = BytesIO()
        preview.save(preview_bytes, 'jpeg')
        preview_bytes.seek(0)
        return preview_bytes

    def update_preview(self, denoise=False, recrop=True):
        """Update the preview image."""

        image = self.adjust_image(self.edit_image)
        if denoise and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.fastNlMeansDenoisingColored(
                open_cv_image, None, self.luminance_denoise,
                self.color_denoise, self.search_window, self.block_size)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)

        self.update_texture(image)
        self.histogram = image.histogram()
        if recrop:
            self.reset_cropper(setup=True)

    def adjust_image(self, image, preview=True):
        """Applies all current editing opterations to an image.
        Arguments:
            image: A PIL image.
            preview: Generate edit image in preview mode (faster)
        Returns: A PIL image.
        """

        if not preview:
            orientation = self.photoinfo[13]
            if orientation == 3 or orientation == 4:
                image = image.transpose(PIL.Image.ROTATE_180)
            elif orientation == 5 or orientation == 6:
                image = image.transpose(PIL.Image.ROTATE_90)
            elif orientation == 7 or orientation == 8:
                image = image.transpose(PIL.Image.ROTATE_270)
            if orientation in [2, 4, 5, 7]:
                image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
            size_multiple = self.size_multiple
        else:
            size_multiple = 1

        if self.sharpen != 0:
            enhancer = ImageEnhance.Sharpness(image)
            image = enhancer.enhance(self.sharpen + 1)
        if self.median_blur != 0 and opencv:
            max_median = 10 * size_multiple
            median = int(self.median_blur * max_median)
            if median % 2 == 0:
                median = median + 1
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.medianBlur(open_cv_image, median)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)
        if self.bilateral != 0 and self.bilateral_amount != 0 and opencv:
            diameter = int(self.bilateral * 10 * size_multiple)
            if diameter < 1:
                diameter = 1
            sigma_color = self.bilateral_amount * 100 * size_multiple
            if sigma_color < 1:
                sigma_color = 1
            sigma_space = sigma_color
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.bilateralFilter(open_cv_image, diameter,
                                                sigma_color, sigma_space)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)
        if self.vignette_amount > 0 and self.vignette_size > 0:
            vignette = Image.new(mode='RGB', size=image.size, color=(0, 0, 0))
            filter_color = int((1 - self.vignette_amount) * 255)
            vignette_mixer = Image.new(mode='L',
                                       size=image.size,
                                       color=filter_color)
            draw = ImageDraw.Draw(vignette_mixer)
            shrink_x = int((self.vignette_size * (image.size[0] / 2)) -
                           (image.size[0] / 4))
            shrink_y = int((self.vignette_size * (image.size[1] / 2)) -
                           (image.size[1] / 4))
            draw.ellipse([
                0 + shrink_x, 0 + shrink_y, image.size[0] - shrink_x,
                image.size[1] - shrink_y
            ],
                         fill=255)
            vignette_mixer = vignette_mixer.filter(
                ImageFilter.GaussianBlur(radius=(self.vignette_amount * 60) +
                                         60))
            image = Image.composite(image, vignette, vignette_mixer)
        if self.edge_blur_amount > 0 and self.edge_blur_intensity > 0 and self.edge_blur_size > 0:
            blur_image = image.filter(
                ImageFilter.GaussianBlur(radius=(self.edge_blur_amount * 30)))
            filter_color = int((1 - self.edge_blur_intensity) * 255)
            blur_mixer = Image.new(mode='L',
                                   size=image.size,
                                   color=filter_color)
            draw = ImageDraw.Draw(blur_mixer)
            shrink_x = int((self.edge_blur_size * (image.size[0] / 2)) -
                           (image.size[0] / 4))
            shrink_y = int((self.edge_blur_size * (image.size[1] / 2)) -
                           (image.size[1] / 4))
            draw.ellipse([
                0 + shrink_x, 0 + shrink_y, image.size[0] - shrink_x,
                image.size[1] - shrink_y
            ],
                         fill=255)
            blur_mixer = blur_mixer.filter(
                ImageFilter.GaussianBlur(radius=(self.edge_blur_amount * 30)))
            image = Image.composite(image, blur_image, blur_mixer)
        if self.crop_top != 0 or self.crop_bottom != 0 or self.crop_left != 0 or self.crop_right != 0:
            if preview:
                overlay = Image.new(mode='RGB',
                                    size=image.size,
                                    color=(0, 0, 0))
                divisor = self.original_width / image.size[0]
                draw = ImageDraw.Draw(overlay)
                draw.rectangle(
                    [0, 0, (self.crop_left / divisor), image.size[1]],
                    fill=(255, 255, 255))
                draw.rectangle(
                    [0, 0, image.size[0], (self.crop_top / divisor)],
                    fill=(255, 255, 255))
                draw.rectangle([(image.size[0] -
                                 (self.crop_right / divisor)), 0,
                                (image.size[0]), image.size[1]],
                               fill=(255, 255, 255))
                draw.rectangle([
                    0, (image.size[1] - (self.crop_bottom / divisor)),
                    image.size[0], image.size[1]
                ],
                               fill=(255, 255, 255))
                bright = ImageEnhance.Brightness(overlay)
                overlay = bright.enhance(.333)
                image = ImageChops.subtract(image, overlay)
            else:
                if self.crop_left >= image.size[0]:
                    crop_left = 0
                else:
                    crop_left = int(self.crop_left)
                if self.crop_top >= image.size[1]:
                    crop_top = 0
                else:
                    crop_top = int(self.crop_top)
                if self.crop_right >= image.size[0]:
                    crop_right = image.size[0]
                else:
                    crop_right = int(image.size[0] - self.crop_right)
                if self.crop_bottom >= image.size[1]:
                    crop_bottom = image.size[1]
                else:
                    crop_bottom = int(image.size[1] - self.crop_bottom)
                if self.video:
                    #ensure that image size is divisible by 2
                    new_width = crop_right - crop_left
                    new_height = crop_bottom - crop_top
                    if new_width % 2 == 1:
                        if crop_right < image.size[0]:
                            crop_right = crop_right + 1
                        else:
                            crop_right = crop_right - 1
                    if new_height % 2 == 1:
                        if crop_bottom < image.size[1]:
                            crop_bottom = crop_bottom + 1
                        else:
                            crop_bottom = crop_bottom - 1
                image = image.crop(
                    (crop_left, crop_top, crop_right, crop_bottom))
        if self.flip_horizontal:
            image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
        if self.flip_vertical:
            image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
        if self.rotate_angle != 0:
            if self.rotate_angle == 90:
                image = image.transpose(PIL.Image.ROTATE_270)
            if self.rotate_angle == 180:
                image = image.transpose(PIL.Image.ROTATE_180)
            if self.rotate_angle == 270:
                image = image.transpose(PIL.Image.ROTATE_90)
        if self.fine_angle != 0:
            total_angle = -self.fine_angle * 10
            angle_radians = math.radians(abs(total_angle))
            width, height = rotated_rect_with_max_area(image.size[0],
                                                       image.size[1],
                                                       angle_radians)
            x = int((image.size[0] - width) / 2)
            y = int((image.size[1] - height) / 2)
            if preview:
                image = image.rotate(total_angle, expand=False)
            else:
                image = image.rotate(total_angle,
                                     resample=PIL.Image.BICUBIC,
                                     expand=False)
            image = image.crop((x, y, image.size[0] - x, image.size[1] - y))
        if self.autocontrast:
            image = ImageOps.autocontrast(image)
        if self.equalize != 0:
            equalize_image = ImageOps.equalize(image)
            image = Image.blend(image, equalize_image, self.equalize)
        temperature = int(round(abs(self.temperature) * 100))
        if temperature != 0:
            temperature = temperature - 1
            if self.temperature > 0:
                kelvin = negative_kelvin[99 - temperature]
            else:
                kelvin = positive_kelvin[temperature]
            matrix = ((kelvin[0] / 255.0), 0.0, 0.0, 0.0, 0.0,
                      (kelvin[1] / 255.0), 0.0, 0.0, 0.0, 0.0,
                      (kelvin[2] / 255.0), 0.0)
            image = image.convert('RGB', matrix)
        if self.brightness != 0:
            enhancer = ImageEnhance.Brightness(image)
            image = enhancer.enhance(1 + self.brightness)
        if self.shadow != 0:
            if self.shadow < 0:
                floor = int(abs(self.shadow) * 128)
                table = [0] * floor
                remaining_length = 256 - floor
                for index in range(0, remaining_length):
                    value = int(round((index / remaining_length) * 256))
                    table.append(value)
                lut = table * 3
            else:
                floor = int(abs(self.shadow) * 128)
                table = []
                for index in range(0, 256):
                    percent = 1 - (index / 255)
                    value = int(round(index + (floor * percent)))
                    table.append(value)
                lut = table * 3
            image = image.point(lut)

        if self.gamma != 0:
            if self.gamma == -1:
                gamma = 99999999999999999
            elif self.gamma < 0:
                gamma = 1 / (self.gamma + 1)
            elif self.gamma > 0:
                gamma = 1 / ((self.gamma + 1) * (self.gamma + 1))
            else:
                gamma = 1
            lut = [pow(x / 255, gamma) * 255 for x in range(256)]
            lut = lut * 3
            image = image.point(lut)
        if self.contrast != 0:
            enhancer = ImageEnhance.Contrast(image)
            image = enhancer.enhance(1 + self.contrast)
        if self.saturation != 0:
            enhancer = ImageEnhance.Color(image)
            image = enhancer.enhance(1 + self.saturation)
        if self.tint != [1.0, 1.0, 1.0, 1.0]:
            matrix = (self.tint[0], 0.0, 0.0, 0.0, 0.0, self.tint[1], 0.0, 0.0,
                      0.0, 0.0, self.tint[2], 0.0)
            image = image.convert('RGB', matrix)
        if self.curve:
            lut = self.curve * 3
            image = image.point(lut)

        if self.denoise and not preview and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.fastNlMeansDenoisingColored(
                open_cv_image, None, self.luminance_denoise,
                self.color_denoise, self.search_window, self.block_size)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)

        if self.adaptive_clip > 0 and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2Lab)
            channels = cv2.split(open_cv_image)
            clahe = cv2.createCLAHE(clipLimit=(self.adaptive_clip * 4),
                                    tileGridSize=(8, 8))
            clahe_image = clahe.apply(channels[0])
            channels[0] = clahe_image
            open_cv_image = cv2.merge(channels)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_Lab2RGB)
            image = Image.fromarray(open_cv_image)

        if self.border_image:
            image_aspect = image.size[0] / image.size[1]
            closest_aspect = min(self.border_image[1],
                                 key=lambda x: abs(x - image_aspect))
            index = self.border_image[1].index(closest_aspect)
            image_file = os.path.join('borders', self.border_image[2][index])
            if preview:
                resample = PIL.Image.NEAREST
            else:
                resample = PIL.Image.BICUBIC
            border_image = Image.open(image_file)
            border_crop_x = int(border_image.size[0] *
                                ((self.border_x_scale + 1) / 15))
            border_crop_y = int(border_image.size[1] *
                                ((self.border_y_scale + 1) / 15))
            border_image = border_image.crop(
                (border_crop_x, border_crop_y,
                 border_image.size[0] - border_crop_x,
                 border_image.size[1] - border_crop_y))
            border_image = border_image.resize(image.size, resample)

            if os.path.splitext(image_file)[1].lower() == '.jpg':
                alpha_file = os.path.splitext(image_file)[0] + '-mask.jpg'
                if not os.path.exists(alpha_file):
                    alpha_file = image_file
                alpha = Image.open(alpha_file)
                alpha = alpha.convert('L')
                alpha = alpha.crop((border_crop_x, border_crop_y,
                                    alpha.size[0] - border_crop_x,
                                    alpha.size[1] - border_crop_y))
                alpha = alpha.resize(image.size, resample)
            else:
                alpha = border_image.split()[-1]
                border_image = border_image.convert('RGB')
            if self.border_tint != [1.0, 1.0, 1.0, 1.0]:
                matrix = (self.border_tint[0], 0.0, 0.0, 1.0, 0.0,
                          self.border_tint[1], 0.0, 1.0, 0.0, 0.0,
                          self.border_tint[2], 1.0)
                border_image = border_image.convert('RGB', matrix)

            enhancer = ImageEnhance.Brightness(alpha)
            alpha = enhancer.enhance(self.border_opacity)
            image = Image.composite(border_image, image, alpha)

        return image

    def update_texture(self, image):
        """Saves a PIL image to the visible texture.
        Argument:
            image: A PIL image
        """

        image_bytes = BytesIO()
        image.save(image_bytes, 'jpeg')
        image_bytes.seek(0)
        self._coreimage = CoreImage(image_bytes, ext='jpg')
        self._on_tex_change()

    def get_full_quality(self):
        """Generate a full sized and full quality version of the source image.
        Returns: A PIL image.
        """

        image = self.original_image.copy()
        if not self.video:
            if self.angle != 0:
                if self.angle == 90:
                    image = image.transpose(PIL.Image.ROTATE_90)
                if self.angle == 180:
                    image = image.transpose(PIL.Image.ROTATE_180)
                if self.angle == 270:
                    image = image.transpose(PIL.Image.ROTATE_270)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        image = self.adjust_image(image, preview=False)
        return image

    def close_image(self):
        self.original_image.close()
예제 #9
0
class FFPyPlayer(BaseMoviePlayer):
    """Interface class for the FFPyPlayer library for use with `MovieStim`.

    This class also serves as the reference implementation for classes which
    interface with movie codec libraries for use with `MovieStim`. Creating new
    player classes which closely replicate the behaviour of this one should
    allow them to smoothly plug into `MovieStim`.

    """
    _movieLib = 'ffpyplayer'

    def __init__(self, parent):
        self._filename = u""

        self.parent = parent

        # handle to `ffpyplayer`
        self._handle = None

        # thread for reading frames asynchronously
        self._tStream = None

        # data from stream thread
        self._lastFrame = NULL_MOVIE_FRAME_INFO
        self._frameIndex = -1
        self._loopCount = 0
        self._metadata = None  # metadata from the stream

        self._lastPlayerOpts = DEFAULT_FF_OPTS.copy()

        # options from the parent
        if self.parent.loop:  # infinite loop
            self._lastPlayerOpts['loop'] = 0
        else:
            self._lastPlayerOpts['loop'] = 1  # play once

        if hasattr(self.parent, '_noAudio'):
            self._lastPlayerOpts['an'] = self.parent._noAudio

        # status flags
        self._status = NOT_STARTED

    def start(self, log=True):
        """Initialize and start the decoder. This method will return when a
        valid frame is made available.

        """
        # clear queued data from previous streams
        self._lastFrame = None
        self._frameIndex = -1

        # open the media player
        self._handle = MediaPlayer(self._filename,
                                   ff_opts=self._lastPlayerOpts)
        self._handle.set_pause(True)

        # Pull the first frame to get metadata. NB - `_enqueueFrame` should be
        # able to do this but the logic in there depends on having access to
        # metadata first. That may be rewritten at some point to reduce all of
        # this to just a single `_enqeueFrame` call.
        #
        self._status = NOT_STARTED

        # hand off the player interface to the thread
        self._tStream = MovieStreamThreadFFPyPlayer(self._handle)
        self._tStream.begin()

        # make sure we have metadata
        self.update()

    def load(self, pathToMovie):
        """Load a movie file from disk.

        Parameters
        ----------
        pathToMovie : str
            Path to movie file, stream (URI) or camera. Must be a format that
            FFMPEG supports.

        """
        # set the file path
        self._filename = pathToString(pathToMovie)

        # Check if the player is already started. Close it and load a new
        # instance if so.
        if self._handle is not None:  # player already started
            # make sure it's the correct type
            if not isinstance(self._handle, MediaPlayer):
                raise TypeError(
                    'Incorrect type for `FFMovieStim._player`, expected '
                    '`ffpyplayer.player.MediaPlayer`. Got type `{}` '
                    'instead.'.format(type(self._handle).__name__))

            # close the player and reset
            self.unload()

            # self._selectWindow(self.win)  # free buffers here !!!

        self.start()

        self._status = NOT_STARTED

    def unload(self):
        """Unload the video stream and reset.
        """
        self._handle.close_player()
        self._filename = u""
        self._frameIndex = -1
        self._handle = None  # reset

    @property
    def handle(self):
        """Handle to the `MediaPlayer` object exposed by FFPyPlayer. If `None`,
        no media player object has yet been initialized.
        """
        return self._handle

    @property
    def isLoaded(self):
        return self._handle is not None

    @property
    def metadata(self):
        """Most recent metadata (`MovieMetadata`).
        """
        return self.getMetadata()

    def getMetadata(self):
        """Get metadata from the movie stream.

        Returns
        -------
        MovieMetadata
            Movie metadata object. If no movie is loaded, `NULL_MOVIE_METADATA`
            is returned. At a minimum, fields `duration`, `size`, and
            `frameRate` are populated if a valid movie has been previously
            loaded.

        """
        self._assertMediaPlayer()

        metadata = self._metadata

        # write metadata to the fields of a `MovieMetadata` object
        toReturn = MovieMetadata(mediaPath=self._filename,
                                 title=metadata['title'],
                                 duration=metadata['duration'],
                                 frameRate=metadata['frame_rate'],
                                 size=metadata['src_vid_size'],
                                 pixelFormat=metadata['src_pix_fmt'],
                                 movieLib=self._movieLib,
                                 userData=None)

        return toReturn

    def _assertMediaPlayer(self):
        """Ensure the media player instance is available. Raises a
        `RuntimeError` if no movie is loaded.
        """
        if isinstance(self._handle, MediaPlayer):
            return  # nop if we're good

        raise RuntimeError(
            "Calling this class method requires a successful call to "
            "`load` first.")

    @property
    def status(self):
        """Player status flag (`int`).
        """
        return self._status

    @property
    def isPlaying(self):
        """`True` if the video is presently playing (`bool`)."""
        # Status flags as properties are pretty useful for users since they are
        # self documenting and prevent the user from touching the status flag
        # attribute directly.
        #
        return self.status == PLAYING

    @property
    def isNotStarted(self):
        """`True` if the video has not be started yet (`bool`). This status is
        given after a video is loaded and play has yet to be called.
        """
        return self.status == NOT_STARTED

    @property
    def isStopped(self):
        """`True` if the movie has been stopped.
        """
        return self.status == STOPPED

    @property
    def isPaused(self):
        """`True` if the movie has been paused.
        """
        self._assertMediaPlayer()

        return self._handle.get_pause()

    @property
    def isFinished(self):
        """`True` if the video is finished (`bool`).
        """
        # why is this the same as STOPPED?
        return self.status == FINISHED

    def play(self, log=False):
        """Start or continue a paused movie from current position.

        Parameters
        ----------
        log : bool
            Log the play event.

        Returns
        -------
        int or None
            Frame index playback started at. Should always be `0` if starting at
            the beginning of the video. Returns `None` if the player has not
            been initialized.

        """
        self._assertMediaPlayer()

        self._tStream.play()

        self._status = PLAYING

    def stop(self, log=False):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted - it must
        be loaded again.

        Use `pause()` instead if you may need to restart the movie.

        Parameters
        ----------
        log : bool
            Log the stop event.

        """
        if self._tStream is None:
            raise RuntimeError("Cannot close stream, not opened yet.")

        # close the thread
        if not self._tStream.isDone():
            self._tStream.shutdown()
        self._tStream.join()  # wait until thread exits
        self._tStream = None

        if self._handle is not None:
            self._handle.close_player()
            self._handle = None  # reset

    def pause(self, log=False):
        """Pause the current point in the movie. The image of the last frame
        will persist on-screen until `play()` or `stop()` are called.

        Parameters
        ----------
        log : bool
            Log this event.

        """
        self._assertMediaPlayer()

        self._tStream.pause()

        return False

    def seek(self, timestamp, log=False):
        """Seek to a particular timestamp in the movie.

        Parameters
        ----------
        timestamp : float
            Time in seconds.
        log : bool
            Log the seek event.

        """
        raise NotImplementedError(
            "This feature is not available for the current backend.")

    def rewind(self, seconds=5, log=False):
        """Rewind the video.

        Parameters
        ----------
        seconds : float
            Time in seconds to rewind from the current position. Default is 5
            seconds.
        log : bool
            Log this event.

        Returns
        -------
        float
            Timestamp after rewinding the video.

        """
        raise NotImplementedError(
            "This feature is not available for the current backend.")

    def fastForward(self, seconds=5, log=False):
        """Fast-forward the video.

        Parameters
        ----------
        seconds : float
            Time in seconds to fast forward from the current position. Default
            is 5 seconds.
        log : bool
            Log this event.

        Returns
        -------
        float
            Timestamp at new position after fast forwarding the video.

        """
        raise NotImplementedError(
            "This feature is not available for the current backend.")

    def replay(self, autoStart=True, log=False):
        """Replay the movie from the beginning.

        Parameters
        ----------
        autoStart : bool
            Start playback immediately. If `False`, you must call `play()`
            afterwards to initiate playback.
        log : bool
            Log this event.

        Notes
        -----
        * This tears down the current media player instance and creates a new
          one. Similar to calling `stop()` and `loadMovie()`. Use `seek(0.0)` if
          you would like to restart the movie without reloading.

        """
        lastMovieFile = self._filename
        self.stop()  # stop the movie
        # self._autoStart = autoStart
        self.load(lastMovieFile)  # will play if auto start

    # --------------------------------------------------------------------------
    # Audio stream control methods
    #

    @property
    def muted(self):
        """`True` if the stream audio is muted (`bool`).
        """
        return self._handle.get_mute()  # thread-safe?

    @muted.setter
    def muted(self, value):
        self._tStream.setMute(value)

    def volumeUp(self, amount):
        """Increase the volume by a fixed amount.

        Parameters
        ----------
        amount : float or int
            Amount to increase the volume relative to the current volume.

        """
        self._assertMediaPlayer()

        # get the current volume from the player
        self.volume = self.volume + amount

        return self.volume

    def volumeDown(self, amount):
        """Decrease the volume by a fixed amount.

        Parameters
        ----------
        amount : float or int
            Amount to decrease the volume relative to the current volume.

        """
        self._assertMediaPlayer()

        # get the current volume from the player
        self.volume = self.volume - amount

        return self.volume

    @property
    def volume(self):
        """Volume for the audio track for this movie (`int` or `float`).
        """
        self._assertMediaPlayer()

        return self._handle.get_volume()  # thread-safe?

    @volume.setter
    def volume(self, value):
        self._assertMediaPlayer()
        self._tStream.setVolume(max(min(value, 1.0), 0.0))

    @property
    def loopCount(self):
        """Number of loops completed since playback started (`int`). This value
        is reset when either `stop` or `loadMovie` is called.
        """
        return self._loopCount

    # --------------------------------------------------------------------------
    # Timing related methods
    #
    # The methods here are used to handle timing, such as converting between
    # movie and experiment timestamps.
    #

    @property
    def pts(self):
        """Presentation timestamp for the current movie frame in seconds
        (`float`).

        The value for this either comes from the decoder or some other time
        source. This should be synchronized to the start of the audio track. A
        value of `-1.0` is invalid.

        """
        if self._handle is None:
            return -1.0

        return self._lastFrame.absTime

    def getStartAbsTime(self):
        """Get the absolute experiment time in seconds the movie starts at
        (`float`).

        This value reflects the time which the movie would have started if
        played continuously from the start. Seeking and pausing the movie causes
        this value to change.

        Returns
        -------
        float
            Start time of the movie in absolute experiment time.

        """
        self._assertMediaPlayer()

        return getTime() - self._lastFrame.absTime

    def movieToAbsTime(self, movieTime):
        """Convert a movie timestamp to absolute experiment timestamp.

        Parameters
        ----------
        movieTime : float
            Movie timestamp to convert to absolute experiment time.

        Returns
        -------
        float
            Timestamp in experiment time which is coincident with the provided
            `movieTime` timestamp. The returned value should usually be precise
            down to about five decimal places.

        """
        self._assertMediaPlayer()

        # type checks on parameters
        if not isinstance(movieTime, float):
            raise TypeError(
                "Value for parameter `movieTime` must have type `float` or "
                "`int`.")

        return self.getStartAbsTime() + movieTime

    def absToMovieTime(self, absTime):
        """Convert absolute experiment timestamp to a movie timestamp.

        Parameters
        ----------
        absTime : float
            Absolute experiment time to convert to movie time.

        Returns
        -------
        float
            Movie time referenced to absolute experiment time. If the value is
            negative then provided `absTime` happens before the beginning of the
            movie from the current time stamp. The returned value should usually
            be precise down to about five decimal places.

        """
        self._assertMediaPlayer()

        # type checks on parameters
        if not isinstance(absTime, float):
            raise TypeError(
                "Value for parameter `absTime` must have type `float` or "
                "`int`.")

        return absTime - self.getStartAbsTime()

    def movieTimeFromFrameIndex(self, frameIdx):
        """Get the movie time a specific a frame with a given index is
        scheduled to be presented.

        This is used to handle logic for seeking through a video feed (if
        permitted by the player).

        Parameters
        ----------
        frameIdx : int
            Frame index. Negative values are accepted but they will return
            negative timestamps.

        """
        self._assertMediaPlayer()

        return frameIdx * self._metadata.frameInterval

    def frameIndexFromMovieTime(self, movieTime):
        """Get the frame index of a given movie time.

        Parameters
        ----------
        movieTime : float
            Timestamp in movie time to convert to a frame index.

        Returns
        -------
        int
            Frame index that should be presented at the specified movie time.

        """
        self._assertMediaPlayer()

        return math.floor(movieTime / self._metadata.frameInterval)

    @property
    def isSeekable(self):
        """Is seeking allowed for the video stream (`bool`)? If `False` then
        `frameIndex` will increase monotonically.
        """
        return False  # fixed for now

    @property
    def frameInterval(self):
        """Duration a single frame is to be presented in seconds (`float`). This
        is derived from the framerate information in the metadata. If not movie
        is loaded, the returned value will be invalid.
        """
        return self.metadata.frameInterval

    @property
    def frameIndex(self):
        """Current frame index (`int`).

        Index of the current frame in the stream. If playing from a file or any
        other seekable source, this value may not increase monotonically with
        time. A value of `-1` is invalid, meaning either the video is not
        started or there is some issue with the stream.

        """
        return self._lastFrame.frameIndex

    def getPercentageComplete(self):
        """Provides a value between 0.0 and 100.0, indicating the amount of the
        movie that has been already played (`float`).
        """
        duration = self.metadata.duration

        return (self.pts / duration) * 100.0

    # --------------------------------------------------------------------------
    # Methods for getting video frames from the encoder
    #

    def _enqueueFrame(self):
        """Grab the latest frame from the stream.

        Returns
        -------
        bool
            `True` if a frame has been enqueued. Returns `False` if the camera
            is not ready or if the stream was closed.

        """
        self._assertMediaPlayer()

        # If the queue is empty, the decoder thread has not yielded a new frame
        # since the last call.
        enqueuedFrame = self._tStream.getRecentFrame()
        if enqueuedFrame is None:
            return False

        # Unpack the data we got back ...
        # Note - Bit messy here, we should just hold onto the `enqueuedFrame`
        # instance and reference its fields from properties. Keeping like this
        # for now.
        frameImage = enqueuedFrame.frameImage
        streamStatus = enqueuedFrame.streamStatus
        self._metadata = enqueuedFrame.metadata
        self.parent.status = self._status = streamStatus.status
        self._frameIndex = streamStatus.frameIndex
        self._loopCount = streamStatus.loopCount

        # status information
        self._streamTime = streamStatus.streamTime  # stream time for the camera

        # if we have a new frame, update the frame information
        videoBuffer = frameImage.to_bytearray()[0]
        videoFrameArray = np.frombuffer(videoBuffer, dtype=np.uint8)

        # provide the last frame
        self._lastFrame = MovieFrame(
            frameIndex=self._frameIndex,
            absTime=self._streamTime,
            displayTime=self.metadata.frameInterval,
            size=frameImage.get_size(),
            colorData=videoFrameArray,
            audioChannels=0,  # not populated yet ...
            audioSamples=None,
            metadata=self.metadata,
            movieLib=u'ffpyplayer',
            userData=None)

        return True

    def update(self):
        """Update this player.

        This get the latest data from the video stream and updates the player
        accordingly. This should be called at a higher frequency than the frame
        rate of the movie to avoid frame skips.

        """
        self._assertMediaPlayer()

        # check if the stream reader thread is present and alive, if not the
        # movie is finished
        if not self._tStream.isDone():
            self._enqueueFrame()
        else:
            self.parent.status = self._status = FINISHED

    def getMovieFrame(self):
        """Get the movie frame scheduled to be displayed at the current time.

        Returns
        -------
        `~psychopy.visual.movies.frame.MovieFrame`
            Current movie frame.

        """
        self.update()

        return self._lastFrame

    def __del__(self):
        """Cleanup when unloading.
        """
        if hasattr(self, '_tStream'):
            if self._tStream is not None:
                if not self._tStream.isDone():
                    self._tStream.shutdown()
                self._tStream.join()

        if hasattr(self, '_handle'):
            if self._handle is not None:
                self._handle.close_player()