예제 #1
0
def save_video_thumbnail(source, output):
    """Saves thumbnail of the given video under the given name"""
    player = MediaPlayer(source, ff_opts={'ss': 1.0})
    frame, val = None, None
    while not frame:
        frame, val = player.get_frame(force_refresh=True)
    player.close_player()
    if val == 'eof':
        return None
    elif frame is None:
        return None
    else:
        img = frame[0]
        pixel_format = img.get_pixel_format()
        img_size = img.get_size()
        thumb_size = 256, int(img_size[1] * 256 / img_size[0])
        codec = 'tiff'
        output_format = get_supported_pixfmts(codec, pixel_format)[0]
        #resize and convert into the best pixel format
        sws = SWScale(img_size[0], img_size[1], pixel_format, thumb_size[0],
                      thumb_size[1], output_format)
        thumbnail = sws.scale(img)
        streams = [{
            'pix_fmt_in': output_format,
            'width_in': thumb_size[0],
            'height_in': thumb_size[1],
            'codec': codec,
            'frame_rate': (30, 1)
        }]
        writer = MediaWriter(output,
                             streams,
                             lib_opts={'compression_algo': 'lzw'})
        writer.write_frame(img=thumbnail, pts=0, stream=0)
        writer.close()
예제 #2
0
def test_play():
    from .common import get_media
    from ffpyplayer.player import MediaPlayer
    import time

    error = [
        None,
    ]

    def callback(selector, value):
        if selector.endswith('error'):
            error[0] = selector, value

    # only video
    ff_opts = {'an': True, 'sync': 'video'}
    player = MediaPlayer(get_media('dw11222.mp4'),
                         callback=callback,
                         ff_opts=ff_opts)

    i = 0
    while not error[0]:
        frame, val = player.get_frame()
        if val == 'eof':
            break
        elif frame is None:
            time.sleep(0.001)
        else:
            img, t = frame
            i += 1

    player.close_player()
    if error[0]:
        raise Exception('{}: {}'.format(*error[0]))

    assert i == 6077
예제 #3
0
def ffplay(path):
    global player
    player = MediaPlayer(path)
    time.sleep(0.5)
    while True:
        if int(float(str(player.get_pts())[:3])) - 2 == int(
                float(str(player.get_metadata()['duration'])[:3])) - 2:
            time.sleep(0.5)
            player.toggle_pause()
            player.close_player()
            break
    time.sleep(1)
예제 #4
0
def verify_frames(filename, timestamps, frame_vals=None):
    from ffpyplayer.player import MediaPlayer
    error = [
        None,
    ]

    def callback(selector, value):
        if selector.endswith('error'):
            error[0] = selector, value

    player = MediaPlayer(filename, callback=callback)

    read_timestamps = set()
    try:
        i = -1
        while not error[0]:
            frame, val = player.get_frame()
            if val == 'eof':
                break
            if val == 'paused':
                raise ValueError('Got paused')
            elif frame is None:
                time.sleep(0.01)
            else:
                img, t = frame
                print(i, t)
                if i < 0:
                    i += 1
                    continue

                print(i, t, timestamps[i])
                read_timestamps.add(t)
                assert math.isclose(t, timestamps[i], rel_tol=.1)

                if frame_vals:
                    assert frame_vals[i] == img.to_bytearray()[0][0]

                i += 1
    finally:
        player.close_player()

    if error[0] is not None:
        raise Exception('{}: {}'.format(*error[0]))

    assert len(timestamps) - 1 == i
    assert len(read_timestamps) == i
예제 #5
0
class MainWindow(QMainWindow):
    pass

    def __init__(self):
        super().__init__()
        self.player = None
        self.setWindowTitle("FFPyPlayer Test")

    def showEvent(self, e):
        self.timer_id = self.startTimer(1)
        self.lbl = QLabel(self)
        self.lbl.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
        self.setCentralWidget(self.lbl)

    def timerEvent(self, event) -> None:
        self.killTimer(self.timer_id)
        ff_opts = {'paused': False, 'autoexit': True}
        self.player = MediaPlayer('../example_data/sample.mp4', ff_opts=ff_opts, lib_opts={})
        # self.player = MediaPlayer('http://localhost:1441/sample_stream.mp4', ff_opts=ff_opts, lib_opts={})
        self.running = True
        while self.running:
            time.sleep(0.01)
            frame, val = self.player.get_frame()
            if val == 'eof':
                break
            if frame is None:
                time.sleep(0.01)
            else:
                img, t = frame
                data = img.to_bytearray()[0]
                width, height = img.get_size()
                # the technical name for the 'rgb24' default pixel format is RGB888,
                # which is QImage.Format_RGB888 in the QImage format enum
                qimage = QImage(data, width, height, QImage.Format_RGB888)
                pixmap = QPixmap.fromImage(qimage)
                pixmap = pixmap.scaled(self.lbl.width(), self.lbl.height(),
                                       Qt.KeepAspectRatio, Qt.SmoothTransformation)
                self.lbl.setPixmap(pixmap)
                time.sleep(val)
            QApplication.processEvents()

    def closeEvent(self, event) -> None:
        self.running = False
        if self.player is not None:
            self.player.set_pause(True)
            self.player.close_player()
    def __update(self):
        """
        Read and add frame into the queue
        """
        player = MediaPlayer(self.video_path) if self.play_audio else None
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                break

            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                (grabbed, frame) = self.stream.read()

                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stopped = True

                # if there are transforms to be done, might as well
                # do them on producer thread before handing back to
                # consumer thread. ie. Usually the producer is so far
                # ahead of consumer that we have time to spare.
                #
                # Python is not parallel but the transform operations
                # are usually OpenCV native so release the GIL.
                #
                # Really just trying to avoid spinning up additional
                # native threads and overheads of additional
                # producer/consumer queues since this one was generally
                # idle grabbing frames.
                if self.transform:
                    frame = self.transform(frame)

                # add the frame to the queue
                self.Q.put(frame)
            else:
                time.sleep(0.1)  # Rest for 10ms, we have a full queue
        if player is not None:
            player.close_player()
        self.stream.release()
예제 #7
0
def play(screen, asciiToNum, videoPath):
    player = MediaPlayer(videoPath)
    screen.nodelay(True)
    while 1:
        frame, val = player.get_frame()

        if val == 'eof':
            break
        elif frame is None:
            time.sleep(0.01)
        else:
            time_bf = time.time()
            c = screen.getch()
            if c == ord('q'):
                break
            img, t = frame
            w, h = img.get_size()
            sws = SWScale(w,
                          h,
                          img.get_pixel_format(),
                          ofmt='yuv420p',
                          ow=w // 8,
                          oh=h // 8)
            img_scaled = sws.scale(img)
            frame_scaled = np.uint8(
                np.array(list(img_scaled.to_bytearray()[0]))).reshape(
                    h // 8, w // 8)
            transformedAscii = transform(frame_scaled, asciiToNum)
            s = arrayToString(transformedAscii)
            time_af = time.time()
            screen.erase()
            screen.addstr(s)
            screen.addstr(str(t))
            screen.refresh()
            time.sleep(0 if 0 > val - (time_af - time_bf) else val -
                       (time_af - time_bf))

    player.close_player()
예제 #8
0
def play_video(filename, check_for_events):
    filename = join(state.cdrom_path, convert_path(filename))
    print("playing", filename)
    player = MediaPlayer(filename)
    val = None
    while val != 'eof':
        if check_for_events():
            player.close_player()
            break

        frame, val = player.get_frame()
        if val != 'eof' and frame is not None:
            img, t = frame
            data = bytes(img.to_bytearray()[0])
            w, h = img.get_size()
            surf = pygame.image.fromstring(data, (w, h), "RGB")
            surf = pygame.transform.scale(surf, (state.height-2*state.gorigin[0], state.width-2*state.gorigin[1]))
 
            sleep(val)
            state.screen.blit(surf, [state.gorigin[0], state.gorigin[1]])

        # Flip the display
        pygame.display.flip()
예제 #9
0
 def audio_thread(self):
     player = MediaPlayer(self.video_path, ff_opts={'vn': True})
     old_signal = self.positive_signal
     signal_timestamp = time.time()
     try:
         player.set_volume(1.0)
         self.audio_start_time_sec = time.time()
         while self.player_is_playing:
             signal = self.positive_signal
             if old_signal != signal:
                 if signal:
                     player.set_volume(1.0)
                     signal_timestamp = time.time()
                 elif time.time() - signal_timestamp > 2:
                     player.set_volume(0.4)
                     signal_timestamp = time.time()
                 old_signal = signal
             time.sleep(0.1)
     except Exception as e:
         print(e)
     finally:
         self.player_is_playing = False
         player.close_player()
예제 #10
0
class VideoStream:
    def __init__(self, video_source=None):
        ff_opts = {'paused': True, 'autoexit': False}  # Audio options
        self.video_surce = video_source
        # Open the video source
        self.player = MediaPlayer(video_source, ff_opts=ff_opts)
        # TODO: colocar pausa de tiempo para cargas mediaplayer y obtener los datos
        # conseguir el frame rate para la sincronizacion self.dalay
        while self.player.get_metadata()['src_vid_size'] == (0, 0):
            time.sleep(0.01)
        data = self.player.get_metadata()
        print('data -->', data)
        self.f_rate = data['frame_rate']
        print('delay -> ', self.f_rate)
        self.w, self.h = data['src_vid_size']
        print('WxH -> ', self.w, self.h)
        self.pts = self.player.get_pts(
        )  # Returns the elapsed play time. float
        print('pts ->', self.pts)
        self.duration = data['duration']
        print('duration', self.duration)
        self.pause = self.player.get_pause(
        )  # Returns whether the player is paused.
        print('pause ->', self.pause)
        self.volume = self.player.get_volume(
        )  # Returns the volume of the audio. loat: A value between 0.0 - 1.0
        print('volume ->', self.volume)
        self.player.toggle_pause(
        )  # Toggles -alterna- the player’s pause state
        # self.player.set_pause(False) # auses or un-pauses the file. state: bool
        cond = True
        while cond:
            self.l_frame, self.val = self.player.get_frame()
            if self.val == 'eof':
                print('can not open source: ', video_source)
                break
            elif self.l_frame is None:
                time.sleep(0.01)
            else:
                self._imagen, self.pts = self.l_frame
                print('pts ->', self.pts)
                # arr = self._imagen.to_memoryview()[0] # array image
                # self.imagen = Image.frombytes("RGB", self.original_size, arr.memview)
                # self.imagen.show()
                cond = False

    # propierties.
    @property
    def f_rate(self):
        return self.__f_rate

    @f_rate.setter
    def f_rate(self, val):
        import math
        vn = val[0]
        vd = val[1]
        if vd <= 1:
            self.__f_rate = vn
        elif vd > 1:
            self.__f_rate = int(round(vn / vd))
        else:
            self.__f_rate = 30

    # end properties.

    def get_frame(self):
        '''
        Return valores:
            val : 'eof' or 'pause' 
            pts : time location aduio imagen.
            imagen : frame image
        Return (val, t, imagen)
        '''
        self.l_frame, self.val = self.player.get_frame()
        if self.val == 'eof':
            # condicion final fichero, salimos if and while
            # self.player.toggle_pause() # ponemos en pause
            return self.val, None, None
        elif self.l_frame is None:
            time.sleep(0.01)
            return self.val, None, None
        else:
            # import math
            self._imagen, self.pts = self.l_frame
            return self.val, self.pts, self._imagen
            # w, h = self._imagen.get_size()
            # linesize = [int(math.ceil(w * 3 / 32.) * 32)]
            # self._imagen = pic.Image(plane_buffers=[bytes(b' ') * (h * linesize[0])],
            #             pix_fmt=self._imagen.get_pixel_format(), size=(w, h), linesize=linesize)
            # self._imagen.get_linesizes(keep_align=True)

            # if self.new_size is not None:
            #     sws = None
            #     n_w , n_h = self.new_size
            #     if n_w > n_h:
            #         sws = pic.SWScale(w, h, self._imagen.get_pixel_format(), oh=n_h)
            #     else:
            #         sws = pic.SWScale(w, h, self._imagen.get_pixel_format(), ow=n_w)
            #     self._imagen = sws.scale(self._imagen)

            # size = self._imagen.get_size()
            # arr = self._imagen.to_memoryview()[0] # array image
            # self.imagen = Image.frombytes("RGB", size, arr.memview)
            # print('>>> videostream::get_frame()::self.pts ->', self.pts)

    def toggle_pause(self):
        '''
            Function: toggle_pause
        '''
        try:  # Stopping audio
            self.player.toggle_pause()
            # self.player = None
        except:
            pass

    def seek(self, pts=None, relative=False, accurate=False):
        if not pts:
            return
        self.player.seek(pts, relative=False, accurate=False)

    def snapshot(self, road=None):
        '''
        get current frame
        '''
        img = self.l_frame[0]
        if img is not None:
            size = img.get_size()
            arr = img.to_memoryview()[0]  # array image
            img = Image.frombytes("RGB", size, arr.memview)
            # vamos a guardar esto.
            time_str = time.strftime("%d-%m-%Y-%H-%M-%S")
            frame_name = f"frame-{time_str}.jpg"
            if not road:
                ruta = os.path.dirname(self.video_surce)
                name_out = os.path.join(ruta, frame_name)
            else:
                name_out = os.path.join(road, frame_name)
            img.save(name_out)

    # Release the video source when the object is destroyed
    def __del__(self):
        self.player.close_player()
        print('__del__')
예제 #11
0
def calculate_frame_diffs_wcall(video_file,
                                masks,
                                cut_ranges,
                                pixel_diff_threshold=10,
                                callback=None,
                                sec_callback=5):
    """
    Calculates frame differences for a video file.
    """
    distances = []
    column_names = (["Range", "Time", "Overall"] +
                    ["ROI{0}".format(j) for j, _ in enumerate(masks)])
    masks = [m.flatten() for m in masks]
    distances.append([-1, -1, 1] + [np.mean(m) for m in masks])

    player = MediaPlayer(video_file,
                         thread_lib="SDL",
                         ff_opts={
                             "out_fmt": "gray8",
                             "an": True,
                             "sn": True
                         })

    frame = get_frame(player)
    if frame is None:
        return pd.DataFrame(distances, columns=column_names)
    img, t = frame

    metadata = player.get_metadata()
    duration, vid_size = metadata["duration"], metadata["src_vid_size"]
    vid_size = (vid_size[1], vid_size[0])

    img, t = frame
    oframe = np.asarray(img.to_memoryview(keep_align=False)[0], dtype=np.uint8)

    range_end = [r * duration for r in cut_ranges[0]]
    range_selected = [True] + cut_ranges[1]

    t0 = 0
    last_callback = 0
    crange = 0

    while True:
        # Get next frame
        frame = get_frame(player, t0)
        if frame is None:
            break
        img, t = frame

        # Update current range
        if t >= range_end[crange]:
            nrange = update_range(crange, range_selected)
            if nrange == len(range_selected):
                break
            if nrange > crange:
                if t < range_end[nrange - 1]:
                    player.seek(range_end[nrange - 1], relative=False)
                    oframe = None
                    t0 = range_end[nrange - 1]
                    continue
                crange = nrange

        # Calculate frame difference
        cframe = np.asarray(img.to_memoryview(keep_align=False)[0])
        if oframe is not None:
            frame_diff = ((cframe - oframe > pixel_diff_threshold) &
                          (oframe - cframe > pixel_diff_threshold))
            distances.append([crange, t, np.mean(frame_diff)] +
                             [np.mean(frame_diff & mask) for mask in masks])
            # Callback
            if callback is not None and (t - last_callback) >= sec_callback:
                last_callback = t
                callback(t / duration, frame_diff.reshape(vid_size))
        oframe = cframe
        t0 = t

    player.close_player()

    return pd.DataFrame(distances, columns=column_names)
예제 #12
0
class DowGlImage(QtOpenGLWidgets.QOpenGLWidget, QtGui.QOpenGLFunctions):
    __vertex_shader = """
    #version 440 core
    layout(location = 0) in vec3 inPosition;
    layout(location = 1) in vec2 texCoord;
    layout(location = 2) uniform vec2 biasTexCoord;

    layout(location = 0) out vec3 outColor;
    layout(location = 1) out vec2 outCoord;

    void main()
    {
      outColor = vec3(1.0f, 0.5f, 1.0f);
      outCoord = texCoord;
      float pos_x = inPosition.x * biasTexCoord.x;
      float pos_y = inPosition.y * biasTexCoord.y;

      gl_Position = vec4(pos_x, pos_y, 0.0, 1.0);
    }"""

    __frag_shader = """
    #version 440 core
    layout(location = 0) in vec3 inColor;
    layout(location = 1) in vec2 texCoord;
    layout(location = 0) out vec4 outColor;
    uniform sampler2D inTexture;

    void main()
    {
      outColor = texture(inTexture, texCoord);
    }
    """

    def __init__(self, parent, tag=None):
        QtOpenGLWidgets.QOpenGLWidget.__init__(self, parent)
        GL.__init__(self)
        self.__data = np.array([
            -1.0, -1.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0,
            1.0, 1.0, 1.0, -1.0, 0.0, 1.0, 0.0
        ],
                               dtype=ctypes.c_float)

        self.tag = tag
        self.__mutex = threading.Lock()
        self._is_video = False
        self._is_video_playing = False

        self.__texture_generator = None
        self.__player = None
        self.__uniform_tex_bias = -1

    def __del__(self):
        self.__video_thread._stop()
        self.__video_thread.join()

    def initializeGL(self):
        self.initializeOpenGLFunctions()
        self.glClearColor(0, 0, 0, 1)

        self.__program = QtOpenGL.QOpenGLShaderProgram()
        self.__program.addShaderFromSourceCode(QtOpenGL.QOpenGLShader.Vertex,
                                               self.__vertex_shader)
        self.__program.addShaderFromSourceCode(QtOpenGL.QOpenGLShader.Fragment,
                                               self.__frag_shader)
        self.__program.link()

        self.__uniform_tex_bias = self.__program.uniformLocation(
            "biasTexCoord")

        self.__vao = QtOpenGL.QOpenGLVertexArrayObject()
        self.__vao.create()
        self.__vao.bind()

        self.__buffer = QtOpenGL.QOpenGLBuffer(
            QtOpenGL.QOpenGLBuffer.Type.VertexBuffer)
        self.__buffer.create()
        self.__buffer.bind()

        float_size = ctypes.sizeof(ctypes.c_float)
        null = VoidPtr(0)
        pointer = VoidPtr(3 * float_size)

        self.__buffer.allocate(self.__data.tobytes(),
                               self.__data.size * float_size)
        self.glVertexAttribPointer(0, 3, int(pygl.GL_FLOAT),
                                   int(pygl.GL_FALSE), 5 * float_size, null)
        self.glVertexAttribPointer(1, 2, int(pygl.GL_FLOAT),
                                   int(pygl.GL_FALSE), 5 * float_size, pointer)
        self.glEnableVertexAttribArray(0)
        self.glEnableVertexAttribArray(1)
        self.__vao.release()
        self.__buffer.release()

        self.__video_thread = threading.Thread(target=self.__video_play,
                                               args=(),
                                               daemon=True)
        self.__video_thread.start()

    def resizeGL(self, w, h):
        self.glViewport(0, 0, w, h)

    def paintGL(self):
        self.glClear(pygl.GL_COLOR_BUFFER_BIT)

        self.__mutex.acquire()

        if self.__texture_generator is not None:
            texture = None
            try:
                texture = next(self.__texture_generator)
            except:
                pass

            if texture is not None:
                rate = min(self.size().width() / texture.width(),
                           self.size().height() / texture.height())
                rate_x = (texture.width() / self.size().width()) * rate
                rate_y = (texture.height() / self.size().height()) * rate
                self.__program.bind()
                if self.__uniform_tex_bias > -1:
                    self.__program.setUniformValue(self.__uniform_tex_bias,
                                                   rate_x, rate_y)

                self.__vao.bind()
                self.glActiveTexture(pygl.GL_TEXTURE0)
                texture.bind()
                self.glDrawArrays(int(pygl.GL_POLYGON), 0, 4)
                texture.release()
                self.__vao.release()
                self.__program.release()
                if self._is_video:
                    texture.destroy()
            else:
                self.__texture_generator = None
                self._is_video = False

        self.__mutex.release()

    def __create_texture(self, image):
        texture = QtOpenGL.QOpenGLTexture(QtOpenGL.QOpenGLTexture.Target2D)
        texture.setMinMagFilters(QtOpenGL.QOpenGLTexture.Filter.Nearest,
                                 QtOpenGL.QOpenGLTexture.Filter.Linear)
        texture.setBorderColor(0, 0, 0, 1)
        texture.setWrapMode(QtOpenGL.QOpenGLTexture.ClampToBorder)
        texture.setAutoMipMapGenerationEnabled(False)
        texture.DontGenerateMipMaps = True
        texture.setData(
            QtGui.QImage(image, image.shape[1], image.shape[0],
                         QtGui.QImage.Format_RGBA8888).mirrored())
        return texture

    def __video_stream(self, filename):
        video = cv2.VideoCapture(str(filename))
        if self.__player is not None:
            self.__player.close_player()
            self.__player = None

        self.__player = MediaPlayer(str(filename))
        self.__player.set_volume(1.0)
        self._is_video_playing = True
        while video.isOpened():
            ret, frame = video.read()
            self.__player.get_frame(show=False)
            if ret:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
                tex = self.__create_texture(frame)
                yield tex
            else:
                video.set(cv2.CAP_PROP_POS_FRAMES, 0)
                self.__player.seek(0, relative=False)

        self._is_video_playing = False
        return None

    def __image_stream(self, filename):
        image = cv2.imread(str(filename), cv2.IMREAD_UNCHANGED)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
        tex = self.__create_texture(image)
        if self.__player is not None:
            self.__player.close_player()
            self.__player = None

        while True:
            yield tex

    def SetImage(self, filename):
        self.__mutex.acquire()
        self._is_video = False
        if self.__texture_generator != None:
            tex = next(self.__texture_generator)
            tex.destroy()
        self.__texture_generator = self.__image_stream(filename)
        self.__mutex.release()

    def SetVideo(self, filename):
        self.__mutex.acquire()
        self._is_video = True
        self.__texture_generator = self.__video_stream(filename)
        self.__mutex.release()

    def Clear(self):
        self.__mutex.acquire()
        self.__texture_generator = None
        self.__mutex.release()

    def __video_play(self):
        while True:
            try:
                pass
                self.update()
            except:
                break
            finally:
                pass
            time.sleep(0.0416)
예제 #13
0
class Application(tk.Frame):
    def __init__(self, master=None):
        super().__init__(master)
        self.master = master
        self.master.geometry("640x420")
        self.winfo_toplevel().title("YouTube downloader")
        #Create icon from base64 string
        icon_file = io.BytesIO(base64.b64decode(icon))
        img = Image.open(icon_file, mode='r')
        self.master.iconphoto(True, ImageTk.PhotoImage(image=img))

        self.video_link = tk.StringVar()  #Link to the youtube video
        self.download_path = tk.StringVar()  #Folder to download videos to
        self.video_folder = tk.StringVar(
        )  #Represents the folder to play videos from
        self.selected_video = 0  #Current playing video, idx
        self.playlist = []  #Array of video's to play
        self.downloadLeft = [0, 0]
        self.download_count = None  #Download count text widget
        self.video_list = None
        self.video_embed = None
        #Styles
        self.mainBgColor = "#121212"
        self.labelBgColor = "#1E1E1E"
        self.fontColor = "white"
        self.btnHighlight = "#FF00FF"

        #Video player
        self.video_player = None
        #Create blank image for video player
        img_str = io.BytesIO(base64.b64decode(blank))
        img = Image.open(img_str, mode='r')
        self.blank_img = ImageTk.PhotoImage(image=img)

        #Play/Stop video buttons
        self.playButton = None
        self.stopButton = None
        self.pauseButton = None
        self.playlistChanged = False  #Flag for seeing whether playlist has changed
        self.songChanged = False  #Flag for seeing if user pressed next button
        self.playback_buttons_frame = None  #Bind the playback buttons frame for swapping buttons inside of it
        self.isPlaying = False  #Flag used seeing if video is being streamed (Used in next/prev song, not in thread termination!)
        self.curVolume = 50
        self.curBassBoost = 0  #Maybe used one day
        self.now_playing = ""

        self.master.resizable(False, False)
        self.create_widgets()
        self.fps = 30

    def create_widgets(self):
        self.master.config(bg=self.mainBgColor)

        ##DOWNLOADING
        download_frame = LabelFrame(self.master, bg=self.mainBgColor, width=20)
        download_frame.grid(row=0, column=0, pady=10)

        #Input/Link frame
        input_frame = LabelFrame(download_frame, bg=self.mainBgColor, bd=0)
        input_frame.grid(row=0, column=0, padx=5)

        link_lable = tk.Label(input_frame,
                              text="YouTube link: ",
                              width=10,
                              bg=self.mainBgColor,
                              fg=self.fontColor)
        link_lable.grid(row=1, column=0, pady=5, padx=20)

        self.master.linkText = tk.Entry(input_frame,
                                        width=54,
                                        textvariable=self.video_link,
                                        bg=self.labelBgColor,
                                        fg=self.fontColor)
        self.master.linkText.grid(row=1, column=1, padx=2)

        #destination frame (for better button positioning)
        destination_frame = LabelFrame(download_frame,
                                       bg=self.mainBgColor,
                                       bd=0)
        destination_frame.grid(row=2, column=0)

        destination_label = tk.Label(destination_frame,
                                     text="Destination: ",
                                     width=10,
                                     bg=self.mainBgColor,
                                     fg=self.fontColor)
        destination_label.grid(row=0, column=0, padx=20)

        self.master.destinationText = tk.Entry(destination_frame,
                                               width=40,
                                               textvariable=self.download_path,
                                               bg=self.labelBgColor,
                                               fg=self.fontColor)
        self.master.destinationText.grid(row=0, column=1, padx=(4, 0))

        browse_B = tk.Button(destination_frame,
                             text="Browse",
                             command=self.BrowseDestination,
                             width=10,
                             bg=self.labelBgColor,
                             fg=self.fontColor,
                             activebackground=self.btnHighlight,
                             activeforeground="black")
        browse_B.grid(row=0, column=2, padx=4)

        #Download frame
        download_btn_frame = LabelFrame(download_frame,
                                        bg=self.mainBgColor,
                                        bd=0)
        download_btn_frame.grid(row=3, column=0)

        download_b = tk.Button(download_btn_frame,
                               text="Download",
                               command=self.Download,
                               width=20,
                               bg=self.labelBgColor,
                               fg="white",
                               activebackground=self.btnHighlight,
                               activeforeground="black")
        download_b.grid(row=0, column=0)

        self.download_count = tk.Text(download_btn_frame,
                                      width=22,
                                      height=1,
                                      bg=self.mainBgColor,
                                      fg="white",
                                      bd=0)
        self.download_count.grid(row=0, column=1, padx=5)
        self.download_count.insert(tk.END, "Download status: ")
        self.download_count.insert(tk.END, self.downloadLeft[0])
        self.download_count.insert(tk.END, " / ")
        self.download_count.insert(tk.END, self.downloadLeft[1])

        ##VIDEO PLAYER

        #Playback buttons and volume sliders container
        playback_container_frame = LabelFrame(self.master,
                                              bg=self.mainBgColor,
                                              bd=0)
        playback_container_frame.grid(row=1, column=0, pady=5)

        #Container that has bottom and top frame for playback buttons
        buttons_container = LabelFrame(playback_container_frame,
                                       bg=self.mainBgColor,
                                       bd=0)
        buttons_container.grid(row=0, column=0)

        #Top frame (Empty)
        top_frame = LabelFrame(buttons_container,
                               bg=self.mainBgColor,
                               height=22,
                               bd=0)
        top_frame.grid(row=0, column=0)

        self.playback_buttons_frame = LabelFrame(buttons_container,
                                                 bg=self.mainBgColor,
                                                 bd=0)
        self.playback_buttons_frame.grid(row=1,
                                         column=0,
                                         padx=(50, 0),
                                         pady=(8, 0))

        prevVid = tk.Button(self.playback_buttons_frame,
                            text="Prev",
                            command=self.PreviousVideo,
                            width=10,
                            bg=self.labelBgColor,
                            fg="white",
                            activebackground=self.btnHighlight,
                            activeforeground="black")

        prevVid.grid(row=0, column=0)

        self.playButton = tk.Button(self.playback_buttons_frame,
                                    text="Play",
                                    command=self.PlayVideo,
                                    width=10,
                                    bg=self.labelBgColor,
                                    fg="white",
                                    activebackground=self.btnHighlight,
                                    activeforeground="black")
        self.playButton.grid(row=0, column=1)

        nextVid = tk.Button(self.playback_buttons_frame,
                            text="Next",
                            command=self.NextVideo,
                            width=10,
                            bg=self.labelBgColor,
                            fg="white",
                            activebackground=self.btnHighlight,
                            activeforeground="black")
        nextVid.grid(row=0, column=2)

        self.pauseButton = tk.Button(self.playback_buttons_frame,
                                     text="Pause",
                                     command=self.PauseVideo,
                                     width=10,
                                     bg=self.labelBgColor,
                                     fg="white",
                                     activebackground=self.btnHighlight,
                                     activeforeground="black")

        self.pauseButton.grid(row=0, column=3)

        #Volume slider and bass EQ
        eq_frame = LabelFrame(playback_container_frame,
                              bg=self.mainBgColor,
                              bd=0)
        eq_frame.grid(row=0, column=1, padx=50)

        volumeText = tk.Text(eq_frame,
                             width=10,
                             height=1,
                             bg=self.mainBgColor,
                             fg="white",
                             bd=0)
        volumeText.tag_configure("center", justify="center")
        volumeText.insert("1.0", "Volume")
        volumeText.tag_add("center", "1.0", "end")
        volumeText.grid(row=0, column=0, padx=(30, 0))
        volume_slider = Scale(eq_frame,
                              from_=0,
                              to=100,
                              orient=tk.HORIZONTAL,
                              bg=self.mainBgColor,
                              bd=0,
                              fg="white",
                              troughcolor=self.labelBgColor,
                              highlightbackground=self.mainBgColor,
                              activebackground="#FF00FF",
                              command=self.VolumeSlider,
                              length=150)

        volume_slider.grid(row=1, column=0, padx=(30, 0))
        volume_slider.set(self.curVolume)

        #EQ/Bass boost slider for future use
        #TODO:: Re-compile ffpyplayer module with custom sdl_audio_callback function call, which will take use DSP for changing the pitch of the audio
        #Bind this slider to callback the sdl_audio_callback custom function
        # bassText = tk.Text(eq_frame, width=10, height=1, bg=self.mainBgColor, fg="white", bd=0)
        # bassText.insert(tk.END, "Bass boost")
        # bassText.grid(row=0, column=1)
        # bass_slider = Scale(eq_frame, from_=0, to=100, orient=tk.HORIZONTAL,
        #                     bg=self.mainBgColor, bd=0, fg="white",
        #                     troughcolor=self.labelBgColor, highlightbackground=self.mainBgColor,
        #                     activebackground="#FF00FF")
        # bass_slider.grid(row=1, column=1)
        # bass_slider.set(self.curBassBoost)

        #Video stream frame
        video_player_frame = LabelFrame(self.master,
                                        bg=self.labelBgColor,
                                        bd=1)
        video_player_frame.grid(row=2, column=0, padx=17)

        self.video_embed = tk.Label(video_player_frame,
                                    text="Video",
                                    image=self.blank_img,
                                    bg=self.labelBgColor)
        self.video_embed.grid(row=0, column=0)

        self.now_playing = Text(video_player_frame,
                                width=50,
                                height=1,
                                bg=self.mainBgColor,
                                fg="white")
        self.now_playing.insert(tk.END, "Now playing: ")
        self.now_playing.grid(row=1, column=0)

        #Video queue frame
        queue_frame = LabelFrame(video_player_frame,
                                 bg=self.labelBgColor,
                                 bd=0)
        queue_frame.grid_rowconfigure(0, weight=0)
        queue_frame.grid_columnconfigure(0, weight=1)
        queue_frame.grid(row=0, column=1)

        queue_buttons = LabelFrame(queue_frame, bg=self.labelBgColor)
        queue_buttons.grid(row=0, column=0)

        browse_In = tk.Button(queue_buttons,
                              text="Browse",
                              command=self.BrowseInputFolder,
                              width=10,
                              bg=self.labelBgColor,
                              fg=self.fontColor,
                              bd=1,
                              activebackground=self.btnHighlight,
                              activeforeground="black")
        browse_In.grid(row=0, column=1)

        playAll = tk.Button(queue_buttons,
                            text="Select all",
                            command=self.SelectAll,
                            width=10,
                            bg=self.labelBgColor,
                            fg=self.fontColor,
                            bd=1,
                            activebackground=self.btnHighlight,
                            activeforeground="black")
        playAll.grid(row=0, column=2)

        self.video_list = tk.Listbox(queue_frame,
                                     font=("Helvetica", 12),
                                     selectmode=tk.EXTENDED,
                                     exportselection=0,
                                     height=9,
                                     bg=self.labelBgColor,
                                     fg=self.fontColor,
                                     bd=0,
                                     selectbackground=self.btnHighlight)
        self.video_list.grid(row=1, column=0)
        self.video_list.bind("<<ListboxSelect>>", self.listbox_sel_callback)
        #self.video_list.bind('<Double-Button>', self.PlayVideo)            #Double clicking video causes thread exceptions for some reason

        scrollbar = Scrollbar(queue_frame,
                              orient="vertical",
                              command=self.video_list.yview,
                              bg=self.labelBgColor,
                              highlightcolor=self.btnHighlight,
                              bd=0)
        self.video_list.config(yscrollcommand=scrollbar.set)
        scrollbar.grid(row=1, column=1, sticky='ns')

        self.curVolume = 50
        #Search bar for videos
        # search_bar = tk.Entry(queue_frame, bd=0, )
        # search_bar.grid(row=2,column=0)

    def listbox_sel_callback(self, event):
        self.playlist = []
        indices = self.video_list.curselection()
        for i in indices:
            self.playlist.append(self.video_list.get(i))
        self.playlistChanged = True

    def BrowseInputFolder(self):
        video_dir = filedialog.askdirectory(initialdir="C:\\YoutubeVideos")
        self.video_folder.set(video_dir)
        self.video_list.delete(0, tk.END)
        for root, dirs, files in os.walk(self.video_folder.get()):
            for filename in files:
                self.video_list.insert(tk.END, filename)

    def PlayVideo(self):
        global stop_thread
        stop_thread = True
        time.sleep(0.05)  #Dangerous way of waiting for thread lol
        stop_thread = False
        self.isPlaying = True
        self.playlistChanged = False

        # if self.selected_video >= 0 and self.selected_video < len(self.playlist):
        self.start_videostream()
        #self.video_player.set_volume(float(self.curVolume)/100)
        thread = threading.Thread(target=self.Video_data_stream)
        thread.daemon = 1
        thread.start()
        self.playButton.grid_forget()
        self.stopButton = tk.Button(self.playback_buttons_frame,
                                    text="Stop",
                                    command=self.StopVideo,
                                    width=10,
                                    bg="#FF00FF",
                                    fg="black")
        self.stopButton.grid(row=0, column=1)

        #Change the "now playing"
        self.changeNowPlaying()

    def changeNowPlaying(self):
        self.now_playing.delete("1.0", tk.END)
        self.now_playing.insert(tk.END, "Now playing: ")
        if self.isPlaying:
            self.now_playing.insert(tk.END, self.playlist[self.selected_video])

    def StopVideo(self):
        global stop_thread
        global pause_thread
        self.isPlaying = False
        stop_thread = True
        pause_thread = True
        self.PauseVideo()

        self.stopButton.grid_forget()
        self.playButton = tk.Button(self.playback_buttons_frame,
                                    text="Play",
                                    command=self.PlayVideo,
                                    width=10,
                                    bg=self.labelBgColor,
                                    fg=self.fontColor)
        self.playButton.grid(row=0, column=1)
        self.changeNowPlaying()

    def PauseVideo(self):
        global pause_thread
        if pause_thread:
            #Why isn't this done in play/stop aswell lol
            self.pauseButton.config(text="Pause",
                                    bg=self.labelBgColor,
                                    fg="white")
            pause_thread = False
            self.video_player.set_pause(False)
        else:
            self.pauseButton.config(text="Unpause",
                                    bg=self.btnHighlight,
                                    fg="black")
            pause_thread = True
            self.video_player.set_pause(True)

    def start_videostream(self):
        #Start new instance of player
        if self.video_player:
            self.video_player.close_player()
        cVol = float(self.curVolume) / 100
        print(cVol)
        self.video_player = MediaPlayer(self.video_folder.get() + "\\" +
                                        self.playlist[self.selected_video],
                                        ff_opts={
                                            'paused': True,
                                            'volume': 0.03
                                        })
        self.video_player.set_size(400, 200)
        #while not self.video_player:
        #    continue
        time.sleep(0.1)
        if self.video_player:
            self.video_player.set_volume(cVol)
        self.video_player.set_pause(False)

    def NextVideo(self):
        if self.isPlaying == False:
            return
        #Destroy current player if there's one
        self.video_player.close_player()

        #Inform the video stream that video was changed
        self.songChanged = True

        #If playlist was changed, reset the index to 0
        if self.playlistChanged:
            self.selected_video = 0
            self.playlistChanged = False
        #Other wise just increment idx or start from 0 idx
        elif self.selected_video < len(self.playlist) - 1:
            self.selected_video += 1
        else:
            self.selected_video = 0

        self.start_videostream()
        #self.video_player.set_volume(float(self.curVolume)/100)
        self.changeNowPlaying()

    def PreviousVideo(self):
        if self.isPlaying == False:
            return

        #Destroy current player if there's one
        self.video_player.close_player()

        self.songChanged = True

        if self.playlistChanged:
            self.selected_video = 0
            self.playlistChanged = False
        elif self.selected_video > 0:
            self.selected_video -= 1
        else:
            self.selected_video = len(self.playlist) - 1

        self.start_videostream()
        self.changeNowPlaying()

    def SelectAll(self):
        #Select every line in listbox / Every video from list
        for i in range(0, self.video_list.size()):
            self.video_list.selection_set(i)
        #Since manual selection doesn't call callback functions, just add them to playlist manually
        self.playlist = []
        indices = self.video_list.curselection()
        for i in indices:
            self.playlist.append(self.video_list.get(i))
        self.playlistChanged = True

    def VolumeSlider(self, value):
        if self.video_player:
            self.video_player.set_volume(float(value) / 100)
        self.curVolume = value

    def Video_data_stream(self):
        global stop_thread
        global pause_thread
        stop_thread = False
        pause_thread = False

        #Start video/audio stream
        #todo:: len(self.playlist will change)
        while True:
            try:

                frame, val = self.video_player.get_frame()
                if val == 'eof':
                    self.video_player.close_player()
                    self.NextVideo()  #Increment the video index
                    self.video_player.set_volume(float(self.curVolume) / 100)
                    #If we still have videos left in playlist, play another one
                    # if self.selected_video < len(self.playlist):
                    #     self.video_player = MediaPlayer(self.video_folder.get() + "\\" + self.playlist[self.selected_video])
                    #     self.video_player.set_size(400, 200)
                elif frame is None:
                    time.sleep(0.01)
                else:
                    image, t = frame
                    w, h = image.get_size()
                    img = np.asarray(image.to_bytearray()[0]).reshape(h, w, 3)
                    the_frame = ImageTk.PhotoImage(Image.fromarray(img))
                    self.video_embed.config(image=the_frame)
                    self.video_embed.image = the_frame
                    if stop_thread:
                        self.video_player.close_player()
                        #Reset the embed image
                        self.video_embed.config(image=self.blank_img)
                        return
                    while pause_thread:
                        #Do nothing
                        if stop_thread:
                            pause_thread = False
                            return
                        continue
                    if val <= 1:
                        time.sleep(val)
            except:
                #Exception (e.g outside thread changes to player can cause exception)
                continue

    def BrowseDestination(self):
        download_directory = filedialog.askdirectory(
            initialdir="C:\\YoutubeVideos")
        self.download_path.set(download_directory)

    def Download(self):
        self.Update_Download_Status()

        link = self.video_link.get()
        download_folder = self.download_path.get()
        if "list" in link:
            playlist = Playlist(link)
            thread = threading.Thread(target=self.Download_Playlist,
                                      args=(
                                          playlist,
                                          download_folder,
                                      ))
            thread.daemon = 1
            thread.start()
        else:
            thread = threading.Thread(target=self.Download_Single,
                                      args=(
                                          link,
                                          download_folder,
                                      ))
            thread.daemon = 1
            thread.start()

    def Download_Playlist(self, playlist, folder):
        self.downloadLeft = [0, len(playlist.video_urls)]
        for url in playlist.video_urls:
            self.Update_Download_Status()
            try:
                getVideo = YouTube(url)
                video_stream_buffer = getVideo.streams.first()
                video_stream_buffer.download(folder)
                self.downloadLeft[0] += 1
            except:
                if self.downloadLeft[1] > 0:
                    self.downloadLeft[1] -= 1
                continue
        self.Update_Download_Status()
        messagebox.showinfo("Download complete!",
                            "Downloaded videos from playlist to:\n" + folder)

    def Download_Single(self, link, folder):
        self.downloadLeft = [0, 1]
        try:
            self.Update_Download_Status()
            getVideo = YouTube(link)

            video_stream_buffer = getVideo.streams.first()
            video_stream_buffer.download(folder)
            self.downloadLeft = [1, 1]
            messagebox.showinfo("Download complete!",
                                "Downloaded video to:\n" + folder)
        except:
            messagebox.showinfo("Download failed!",
                                "Video not available:\n" + folder)
        self.Update_Download_Status()

    def Update_Download_Status(self):
        self.download_count.delete('1.0', tk.END)
        self.download_count.insert(tk.END, "Download status: ")
        self.download_count.insert(tk.END, self.downloadLeft[0])
        self.download_count.insert(tk.END, " / ")
        self.download_count.insert(tk.END, self.downloadLeft[1])
예제 #14
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.flag = True

    def Listadd(self):
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for filelist in f:
                    filelist = filelist.strip()
                    self.list.addItem(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        self.list.addItems(filelists)
        self.Listchanged()

    def Remove(self):
        self.list.takeItem(self.list.currentRow())
        self.Listchanged()

    def Clear(self):
        self.list.clear()
        os.remove('CPlayerlist.txt')

    def Listchanged(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write(self.list.item(i).text() + '\n')

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环')

    def Play(self):
        try:
            if self.flag:
                self.playitem = self.list.currentItem().text()
                self.player = MediaPlayer("%s" % self.playitem)
                self.timer = QTimer()
                self.timer.start(50)
                self.timer.timeout.connect(self.Show)
                self.steptimer = QTimer()
                self.steptimer.start(1000)
                self.steptimer.timeout.connect(self.Step)
                self.flag = False
                self.bplay.setIcon(QIcon(r'img\pause.png'))
                self.bplay.setToolTip('暂停')
            else:
                if self.list.currentItem().text() == self.playitem:
                    self.player.toggle_pause()
                    if self.player.get_pause():
                        self.timer.stop()
                        self.steptimer.stop()
                        self.bplay.setIcon(QIcon(r'img\play.png'))
                        self.bplay.setToolTip('播放')
                    else:
                        self.timer.start()
                        self.steptimer.start()
                        self.bplay.setIcon(QIcon(r'img\pause.png'))
                        self.bplay.setToolTip('暂停')
                else:
                    self.step = 0
                    self.stime.setValue(0)
                    self.playitem = self.list.currentItem().text()
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
        except:
            QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        self.stime.setMaximum(int(self.mediatime))
        mediamin, mediasec = divmod(self.mediatime, 60)
        mediahour, mediamin = divmod(mediamin, 60)
        playmin, playsec = divmod(self.step, 60)
        playhour, playmin = divmod(playmin, 60)
        self.ltime.setText(
            '%02d:%02d:%02d/%02d:%02d:%02d' %
            (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放')
            self.lmedia.setPixmap(QPixmap(''))

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音')
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value())
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音')

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.timer.stop()
                    self.steptimer.stop()
                    self.step = 0
                    self.loop = 1
                    self.flag = True
                    self.stime.setValue(0)
                    self.player.close_player()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放')
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')

    def Fastforward(self):
        self.step += 10
        if self.step >= int(self.mediatime):
            self.stime.setValue(int(self.mediatime))
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')

    def Fastback(self):
        self.step -= 10
        if self.step <= 0:
            self.step = 0
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停')
예제 #15
0
class AMonitorService(AService):
    def __init__(self, cb=None, url=None):
        super().__init__(cb)
        self.url = url
        self.player = None
        self.tryTimer = None

    def install(self):
        ret = ecall('adb install -r -g app/MonitorService.apk')
        if ret == 0:
            super().install()

    def _start(self):
        cmds = 'adb shell am start com.rock_chips.monitorservice/.MainActivity'
        self.popen = Popen(cmds.split(), stdout=PIPE, stderr=STDOUT)
        Timer(0.1, self._processStartResult).start()

    def start(self):
        self.needStop = False
        # try connect first for fast boot
        self.connect()
        self._start()

    def _processStartResult(self):
        fd = self.popen.stdout
        line1 = fd.readline().decode()
        line2 = fd.readline().decode()
        if line1.startswith('Starting') and not line2.startswith('Error'):
            self.callCb('started')
            return
        time.sleep(1)
        if self.needStop:
            return
        # try install and start again
        self.popen = None
        self.install()
        self._start()

    def stop(self):
        self.popen = None
        self.needStop = True
        self.disconnect()
        self.callCb('stoped')

    def connect(self):
        if self.needStop:
            return
        if self.url is None:
            print('need url for connect')
            return

        ecall('adb forward tcp:50000 tcp:50000')
        lib_opts = {'analyzeduration': '32', 'flags': 'low_delay'}
        if self.player:
            self.player.close_player()
            self.player = None
        self.player = MediaPlayer(self.url,
                                  callback=self._mediaPlayerCallback,
                                  lib_opts=lib_opts)
        self.connectedTimer = Timer(0.1, self._processConnectResult)
        self.connectedTimer.start()

    def _mediaPlayerCallback(self, selector, value):
        if self.connectedTimer:
            self.connectedTimer.cancel()
            self.connectedTimer = None

        if selector in ('read:error', 'eof'):
            self.callCb('disconnected')
            self.tryTimer = None
            self.tryTimer = Timer(1, self.connect)
            self.tryTimer.start()

    def _processConnectResult(self):
        super().connect()

    def disconnect(self):
        if self.tryTimer:
            self.tryTimer.cancel()
            self.tryTimer = None
        if self.player:
            self.player.close_player()
        super().disconnect()
예제 #16
0
class FFPyPlayer(BaseMoviePlayer):
    """Interface class for the FFPyPlayer library for use with `MovieStim`.

    This class also serves as the reference implementation for classes which
    interface with movie codec libraries for use with `MovieStim`. Creating new
    player classes which closely replicate the behaviour of this one should
    allow them to smoothly plug into `MovieStim`.

    """
    _movieLib = 'ffpyplayer'

    def __init__(self, parent):
        self._filename = u""

        self.parent = parent

        # handle to `ffpyplayer`
        self._handle = None

        # thread for reading frames asynchronously
        self._tStream = None

        # data from stream thread
        self._lastFrame = NULL_MOVIE_FRAME_INFO
        self._frameIndex = -1
        self._loopCount = 0
        self._metadata = None  # metadata from the stream

        self._lastPlayerOpts = DEFAULT_FF_OPTS.copy()

        # options from the parent
        if self.parent.loop:  # infinite loop
            self._lastPlayerOpts['loop'] = 0
        else:
            self._lastPlayerOpts['loop'] = 1  # play once

        if hasattr(self.parent, '_noAudio'):
            self._lastPlayerOpts['an'] = self.parent._noAudio

        # status flags
        self._status = NOT_STARTED

    def start(self, log=True):
        """Initialize and start the decoder. This method will return when a
        valid frame is made available.

        """
        # clear queued data from previous streams
        self._lastFrame = None
        self._frameIndex = -1

        # open the media player
        self._handle = MediaPlayer(self._filename,
                                   ff_opts=self._lastPlayerOpts)
        self._handle.set_pause(True)

        # Pull the first frame to get metadata. NB - `_enqueueFrame` should be
        # able to do this but the logic in there depends on having access to
        # metadata first. That may be rewritten at some point to reduce all of
        # this to just a single `_enqeueFrame` call.
        #
        self._status = NOT_STARTED

        # hand off the player interface to the thread
        self._tStream = MovieStreamThreadFFPyPlayer(self._handle)
        self._tStream.begin()

        # make sure we have metadata
        self.update()

    def load(self, pathToMovie):
        """Load a movie file from disk.

        Parameters
        ----------
        pathToMovie : str
            Path to movie file, stream (URI) or camera. Must be a format that
            FFMPEG supports.

        """
        # set the file path
        self._filename = pathToString(pathToMovie)

        # Check if the player is already started. Close it and load a new
        # instance if so.
        if self._handle is not None:  # player already started
            # make sure it's the correct type
            if not isinstance(self._handle, MediaPlayer):
                raise TypeError(
                    'Incorrect type for `FFMovieStim._player`, expected '
                    '`ffpyplayer.player.MediaPlayer`. Got type `{}` '
                    'instead.'.format(type(self._handle).__name__))

            # close the player and reset
            self.unload()

            # self._selectWindow(self.win)  # free buffers here !!!

        self.start()

        self._status = NOT_STARTED

    def unload(self):
        """Unload the video stream and reset.
        """
        self._handle.close_player()
        self._filename = u""
        self._frameIndex = -1
        self._handle = None  # reset

    @property
    def handle(self):
        """Handle to the `MediaPlayer` object exposed by FFPyPlayer. If `None`,
        no media player object has yet been initialized.
        """
        return self._handle

    @property
    def isLoaded(self):
        return self._handle is not None

    @property
    def metadata(self):
        """Most recent metadata (`MovieMetadata`).
        """
        return self.getMetadata()

    def getMetadata(self):
        """Get metadata from the movie stream.

        Returns
        -------
        MovieMetadata
            Movie metadata object. If no movie is loaded, `NULL_MOVIE_METADATA`
            is returned. At a minimum, fields `duration`, `size`, and
            `frameRate` are populated if a valid movie has been previously
            loaded.

        """
        self._assertMediaPlayer()

        metadata = self._metadata

        # write metadata to the fields of a `MovieMetadata` object
        toReturn = MovieMetadata(mediaPath=self._filename,
                                 title=metadata['title'],
                                 duration=metadata['duration'],
                                 frameRate=metadata['frame_rate'],
                                 size=metadata['src_vid_size'],
                                 pixelFormat=metadata['src_pix_fmt'],
                                 movieLib=self._movieLib,
                                 userData=None)

        return toReturn

    def _assertMediaPlayer(self):
        """Ensure the media player instance is available. Raises a
        `RuntimeError` if no movie is loaded.
        """
        if isinstance(self._handle, MediaPlayer):
            return  # nop if we're good

        raise RuntimeError(
            "Calling this class method requires a successful call to "
            "`load` first.")

    @property
    def status(self):
        """Player status flag (`int`).
        """
        return self._status

    @property
    def isPlaying(self):
        """`True` if the video is presently playing (`bool`)."""
        # Status flags as properties are pretty useful for users since they are
        # self documenting and prevent the user from touching the status flag
        # attribute directly.
        #
        return self.status == PLAYING

    @property
    def isNotStarted(self):
        """`True` if the video has not be started yet (`bool`). This status is
        given after a video is loaded and play has yet to be called.
        """
        return self.status == NOT_STARTED

    @property
    def isStopped(self):
        """`True` if the movie has been stopped.
        """
        return self.status == STOPPED

    @property
    def isPaused(self):
        """`True` if the movie has been paused.
        """
        self._assertMediaPlayer()

        return self._handle.get_pause()

    @property
    def isFinished(self):
        """`True` if the video is finished (`bool`).
        """
        # why is this the same as STOPPED?
        return self.status == FINISHED

    def play(self, log=False):
        """Start or continue a paused movie from current position.

        Parameters
        ----------
        log : bool
            Log the play event.

        Returns
        -------
        int or None
            Frame index playback started at. Should always be `0` if starting at
            the beginning of the video. Returns `None` if the player has not
            been initialized.

        """
        self._assertMediaPlayer()

        self._tStream.play()

        self._status = PLAYING

    def stop(self, log=False):
        """Stop the current point in the movie (sound will stop, current frame
        will not advance). Once stopped the movie cannot be restarted - it must
        be loaded again.

        Use `pause()` instead if you may need to restart the movie.

        Parameters
        ----------
        log : bool
            Log the stop event.

        """
        if self._tStream is None:
            raise RuntimeError("Cannot close stream, not opened yet.")

        # close the thread
        if not self._tStream.isDone():
            self._tStream.shutdown()
        self._tStream.join()  # wait until thread exits
        self._tStream = None

        if self._handle is not None:
            self._handle.close_player()
            self._handle = None  # reset

    def pause(self, log=False):
        """Pause the current point in the movie. The image of the last frame
        will persist on-screen until `play()` or `stop()` are called.

        Parameters
        ----------
        log : bool
            Log this event.

        """
        self._assertMediaPlayer()

        self._tStream.pause()

        return False

    def seek(self, timestamp, log=False):
        """Seek to a particular timestamp in the movie.

        Parameters
        ----------
        timestamp : float
            Time in seconds.
        log : bool
            Log the seek event.

        """
        raise NotImplementedError(
            "This feature is not available for the current backend.")

    def rewind(self, seconds=5, log=False):
        """Rewind the video.

        Parameters
        ----------
        seconds : float
            Time in seconds to rewind from the current position. Default is 5
            seconds.
        log : bool
            Log this event.

        Returns
        -------
        float
            Timestamp after rewinding the video.

        """
        raise NotImplementedError(
            "This feature is not available for the current backend.")

    def fastForward(self, seconds=5, log=False):
        """Fast-forward the video.

        Parameters
        ----------
        seconds : float
            Time in seconds to fast forward from the current position. Default
            is 5 seconds.
        log : bool
            Log this event.

        Returns
        -------
        float
            Timestamp at new position after fast forwarding the video.

        """
        raise NotImplementedError(
            "This feature is not available for the current backend.")

    def replay(self, autoStart=True, log=False):
        """Replay the movie from the beginning.

        Parameters
        ----------
        autoStart : bool
            Start playback immediately. If `False`, you must call `play()`
            afterwards to initiate playback.
        log : bool
            Log this event.

        Notes
        -----
        * This tears down the current media player instance and creates a new
          one. Similar to calling `stop()` and `loadMovie()`. Use `seek(0.0)` if
          you would like to restart the movie without reloading.

        """
        lastMovieFile = self._filename
        self.stop()  # stop the movie
        # self._autoStart = autoStart
        self.load(lastMovieFile)  # will play if auto start

    # --------------------------------------------------------------------------
    # Audio stream control methods
    #

    @property
    def muted(self):
        """`True` if the stream audio is muted (`bool`).
        """
        return self._handle.get_mute()  # thread-safe?

    @muted.setter
    def muted(self, value):
        self._tStream.setMute(value)

    def volumeUp(self, amount):
        """Increase the volume by a fixed amount.

        Parameters
        ----------
        amount : float or int
            Amount to increase the volume relative to the current volume.

        """
        self._assertMediaPlayer()

        # get the current volume from the player
        self.volume = self.volume + amount

        return self.volume

    def volumeDown(self, amount):
        """Decrease the volume by a fixed amount.

        Parameters
        ----------
        amount : float or int
            Amount to decrease the volume relative to the current volume.

        """
        self._assertMediaPlayer()

        # get the current volume from the player
        self.volume = self.volume - amount

        return self.volume

    @property
    def volume(self):
        """Volume for the audio track for this movie (`int` or `float`).
        """
        self._assertMediaPlayer()

        return self._handle.get_volume()  # thread-safe?

    @volume.setter
    def volume(self, value):
        self._assertMediaPlayer()
        self._tStream.setVolume(max(min(value, 1.0), 0.0))

    @property
    def loopCount(self):
        """Number of loops completed since playback started (`int`). This value
        is reset when either `stop` or `loadMovie` is called.
        """
        return self._loopCount

    # --------------------------------------------------------------------------
    # Timing related methods
    #
    # The methods here are used to handle timing, such as converting between
    # movie and experiment timestamps.
    #

    @property
    def pts(self):
        """Presentation timestamp for the current movie frame in seconds
        (`float`).

        The value for this either comes from the decoder or some other time
        source. This should be synchronized to the start of the audio track. A
        value of `-1.0` is invalid.

        """
        if self._handle is None:
            return -1.0

        return self._lastFrame.absTime

    def getStartAbsTime(self):
        """Get the absolute experiment time in seconds the movie starts at
        (`float`).

        This value reflects the time which the movie would have started if
        played continuously from the start. Seeking and pausing the movie causes
        this value to change.

        Returns
        -------
        float
            Start time of the movie in absolute experiment time.

        """
        self._assertMediaPlayer()

        return getTime() - self._lastFrame.absTime

    def movieToAbsTime(self, movieTime):
        """Convert a movie timestamp to absolute experiment timestamp.

        Parameters
        ----------
        movieTime : float
            Movie timestamp to convert to absolute experiment time.

        Returns
        -------
        float
            Timestamp in experiment time which is coincident with the provided
            `movieTime` timestamp. The returned value should usually be precise
            down to about five decimal places.

        """
        self._assertMediaPlayer()

        # type checks on parameters
        if not isinstance(movieTime, float):
            raise TypeError(
                "Value for parameter `movieTime` must have type `float` or "
                "`int`.")

        return self.getStartAbsTime() + movieTime

    def absToMovieTime(self, absTime):
        """Convert absolute experiment timestamp to a movie timestamp.

        Parameters
        ----------
        absTime : float
            Absolute experiment time to convert to movie time.

        Returns
        -------
        float
            Movie time referenced to absolute experiment time. If the value is
            negative then provided `absTime` happens before the beginning of the
            movie from the current time stamp. The returned value should usually
            be precise down to about five decimal places.

        """
        self._assertMediaPlayer()

        # type checks on parameters
        if not isinstance(absTime, float):
            raise TypeError(
                "Value for parameter `absTime` must have type `float` or "
                "`int`.")

        return absTime - self.getStartAbsTime()

    def movieTimeFromFrameIndex(self, frameIdx):
        """Get the movie time a specific a frame with a given index is
        scheduled to be presented.

        This is used to handle logic for seeking through a video feed (if
        permitted by the player).

        Parameters
        ----------
        frameIdx : int
            Frame index. Negative values are accepted but they will return
            negative timestamps.

        """
        self._assertMediaPlayer()

        return frameIdx * self._metadata.frameInterval

    def frameIndexFromMovieTime(self, movieTime):
        """Get the frame index of a given movie time.

        Parameters
        ----------
        movieTime : float
            Timestamp in movie time to convert to a frame index.

        Returns
        -------
        int
            Frame index that should be presented at the specified movie time.

        """
        self._assertMediaPlayer()

        return math.floor(movieTime / self._metadata.frameInterval)

    @property
    def isSeekable(self):
        """Is seeking allowed for the video stream (`bool`)? If `False` then
        `frameIndex` will increase monotonically.
        """
        return False  # fixed for now

    @property
    def frameInterval(self):
        """Duration a single frame is to be presented in seconds (`float`). This
        is derived from the framerate information in the metadata. If not movie
        is loaded, the returned value will be invalid.
        """
        return self.metadata.frameInterval

    @property
    def frameIndex(self):
        """Current frame index (`int`).

        Index of the current frame in the stream. If playing from a file or any
        other seekable source, this value may not increase monotonically with
        time. A value of `-1` is invalid, meaning either the video is not
        started or there is some issue with the stream.

        """
        return self._lastFrame.frameIndex

    def getPercentageComplete(self):
        """Provides a value between 0.0 and 100.0, indicating the amount of the
        movie that has been already played (`float`).
        """
        duration = self.metadata.duration

        return (self.pts / duration) * 100.0

    # --------------------------------------------------------------------------
    # Methods for getting video frames from the encoder
    #

    def _enqueueFrame(self):
        """Grab the latest frame from the stream.

        Returns
        -------
        bool
            `True` if a frame has been enqueued. Returns `False` if the camera
            is not ready or if the stream was closed.

        """
        self._assertMediaPlayer()

        # If the queue is empty, the decoder thread has not yielded a new frame
        # since the last call.
        enqueuedFrame = self._tStream.getRecentFrame()
        if enqueuedFrame is None:
            return False

        # Unpack the data we got back ...
        # Note - Bit messy here, we should just hold onto the `enqueuedFrame`
        # instance and reference its fields from properties. Keeping like this
        # for now.
        frameImage = enqueuedFrame.frameImage
        streamStatus = enqueuedFrame.streamStatus
        self._metadata = enqueuedFrame.metadata
        self.parent.status = self._status = streamStatus.status
        self._frameIndex = streamStatus.frameIndex
        self._loopCount = streamStatus.loopCount

        # status information
        self._streamTime = streamStatus.streamTime  # stream time for the camera

        # if we have a new frame, update the frame information
        videoBuffer = frameImage.to_bytearray()[0]
        videoFrameArray = np.frombuffer(videoBuffer, dtype=np.uint8)

        # provide the last frame
        self._lastFrame = MovieFrame(
            frameIndex=self._frameIndex,
            absTime=self._streamTime,
            displayTime=self.metadata.frameInterval,
            size=frameImage.get_size(),
            colorData=videoFrameArray,
            audioChannels=0,  # not populated yet ...
            audioSamples=None,
            metadata=self.metadata,
            movieLib=u'ffpyplayer',
            userData=None)

        return True

    def update(self):
        """Update this player.

        This get the latest data from the video stream and updates the player
        accordingly. This should be called at a higher frequency than the frame
        rate of the movie to avoid frame skips.

        """
        self._assertMediaPlayer()

        # check if the stream reader thread is present and alive, if not the
        # movie is finished
        if not self._tStream.isDone():
            self._enqueueFrame()
        else:
            self.parent.status = self._status = FINISHED

    def getMovieFrame(self):
        """Get the movie frame scheduled to be displayed at the current time.

        Returns
        -------
        `~psychopy.visual.movies.frame.MovieFrame`
            Current movie frame.

        """
        self.update()

        return self._lastFrame

    def __del__(self):
        """Cleanup when unloading.
        """
        if hasattr(self, '_tStream'):
            if self._tStream is not None:
                if not self._tStream.isDone():
                    self._tStream.shutdown()
                self._tStream.join()

        if hasattr(self, '_handle'):
            if self._handle is not None:
                self._handle.close_player()
예제 #17
0
class CustomImage(KivyImage):
    """Custom image display widget.
    Enables editing operations, displaying them in real-time using a low resolution preview of the original image file.
    All editing variables are watched by the widget and it will automatically update the preview when they are changed.
    """

    exif = ''
    pixel_format = ''
    length = NumericProperty(0)
    framerate = ListProperty()
    video = BooleanProperty(False)
    player = ObjectProperty(None, allownone=True)
    position = NumericProperty(0.0)
    start_point = NumericProperty(0.0)
    end_point = NumericProperty(1.0)
    original_image = ObjectProperty()
    photoinfo = ListProperty()
    original_width = NumericProperty(0)
    original_height = NumericProperty(0)
    flip_horizontal = BooleanProperty(False)
    flip_vertical = BooleanProperty(False)
    mirror = BooleanProperty(False)
    angle = NumericProperty(0)
    rotate_angle = NumericProperty(0)
    fine_angle = NumericProperty(0)
    brightness = NumericProperty(0)
    shadow = NumericProperty(0)
    contrast = NumericProperty(0)
    gamma = NumericProperty(0)
    saturation = NumericProperty(0)
    temperature = NumericProperty(0)
    tint = ListProperty([1.0, 1.0, 1.0, 1.0])
    curve = ListProperty()
    crop_top = NumericProperty(0)
    crop_bottom = NumericProperty(0)
    crop_left = NumericProperty(0)
    crop_right = NumericProperty(0)
    filter = StringProperty('')
    filter_amount = NumericProperty(0)
    autocontrast = BooleanProperty(False)
    equalize = NumericProperty(0)
    histogram = ListProperty()
    edit_image = ObjectProperty()
    cropping = BooleanProperty(False)
    touch_point = ObjectProperty()
    active_cropping = BooleanProperty(False)
    crop_start = ListProperty()
    sharpen = NumericProperty(0)
    bilateral = NumericProperty(0.5)
    bilateral_amount = NumericProperty(0)
    median_blur = NumericProperty(0)
    vignette_amount = NumericProperty(0)
    vignette_size = NumericProperty(.5)
    edge_blur_amount = NumericProperty(0)
    edge_blur_size = NumericProperty(.5)
    edge_blur_intensity = NumericProperty(.5)
    cropper = ObjectProperty()  #Holder for the cropper overlay
    crop_controls = ObjectProperty()  #Holder for the cropper edit panel object
    adaptive_clip = NumericProperty(0)
    border_opacity = NumericProperty(1)
    border_image = ListProperty()
    border_tint = ListProperty([1.0, 1.0, 1.0, 1.0])
    border_x_scale = NumericProperty(.5)
    border_y_scale = NumericProperty(.5)
    crop_min = NumericProperty(100)
    size_multiple = NumericProperty(1)

    #Denoising variables
    denoise = BooleanProperty(False)
    luminance_denoise = NumericProperty(10)
    color_denoise = NumericProperty(10)
    search_window = NumericProperty(15)
    block_size = NumericProperty(5)

    frame_number = 0
    max_frames = 0
    start_seconds = 0
    first_frame = None

    def start_video_convert(self):
        self.close_video()
        self.player = MediaPlayer(self.source,
                                  ff_opts={
                                      'paused': True,
                                      'ss': 0.0,
                                      'an': True
                                  })
        self.player.set_volume(0)
        self.frame_number = 0
        if self.start_point > 0 or self.end_point < 1:
            all_frames = self.length * (self.framerate[0] / self.framerate[1])
            self.max_frames = all_frames * (self.end_point - self.start_point)
        else:
            self.max_frames = 0

        #need to wait for load so the seek routine doesnt crash python
        self.first_frame = self.wait_frame()

        if self.start_point > 0:
            self.start_seconds = self.length * self.start_point
            self.first_frame = self.seek_player(self.start_seconds)

    def wait_frame(self):
        #Ensures that a frame is gotten
        frame = None
        while not frame:
            frame, value = self.player.get_frame(force_refresh=True)
        return frame

    def start_seek(self, seek):
        #tell the player to seek to a position
        self.player.set_pause(False)
        self.player.seek(pts=seek, relative=False, accurate=True)
        self.player.set_pause(True)

    def seek_player(self, seek):
        self.start_seek(seek)

        framerate = self.framerate[0] / self.framerate[1]
        target_seek_frame = seek * framerate

        loops = 0
        total_loops = 0
        while True:
            loops += 1
            total_loops += 1
            if loops > 5:
                #seek has been stuck for a while, try to seek again
                self.start_seek(seek)
                loops = 0
            #check if seek has gotten within a couple frames yet
            frame = self.wait_frame()
            current_seek = frame[1]
            current_seek_frame = current_seek * framerate
            frame_distance = abs(target_seek_frame - current_seek_frame)
            if frame_distance < 2 or total_loops >= 30:
                #seek has finished, or give up after a lot of tries to not freeze the program...
                break
        return frame

    def get_converted_frame(self):
        if self.first_frame:
            frame = self.first_frame
            self.first_frame = None
        else:
            self.player.set_pause(False)
            frame = None
            while not frame:
                frame, value = self.player.get_frame(force_refresh=False)
                if value == 'eof':
                    return None
            self.player.set_pause(True)
        self.frame_number = self.frame_number + 1
        if self.max_frames:
            if self.frame_number > self.max_frames:
                return None
        frame_image = frame[0]
        frame_size = frame_image.get_size()
        frame_converter = SWScale(frame_size[0],
                                  frame_size[1],
                                  frame_image.get_pixel_format(),
                                  ofmt='rgb24')
        new_frame = frame_converter.scale(frame_image)
        image_data = bytes(new_frame.to_bytearray()[0])
        image = Image.frombuffer(mode='RGB',
                                 size=(frame_size[0], frame_size[1]),
                                 data=image_data,
                                 decoder_name='raw')
        #for some reason, video frames are read upside-down? fix it here...
        image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        image = self.adjust_image(image, preview=False)
        return [image, frame[1]]

    def close_video(self):
        if self.player:
            self.player.close_player()
            self.player = None

    def open_video(self):
        self.player = MediaPlayer(self.source,
                                  ff_opts={
                                      'paused': True,
                                      'ss': 1.0,
                                      'an': True
                                  })
        frame = None
        while not frame:
            frame, value = self.player.get_frame(force_refresh=True)
        data = self.player.get_metadata()
        self.length = data['duration']
        self.framerate = data['frame_rate']
        self.pixel_format = data['src_pix_fmt']

    def set_aspect(self, aspect_x, aspect_y):
        """Adjusts the cropping of the image to be a given aspect ratio.
        Attempts to keep the image as large as possible
        Arguments:
            aspect_x: Horizontal aspect ratio element, numerical value.
            aspect_y: Vertical aspect ratio element, numerical value.
        """

        width = self.original_width - self.crop_left - self.crop_right
        height = self.original_height - self.crop_top - self.crop_bottom
        if aspect_x != width or aspect_y != height:
            current_ratio = width / height
            target_ratio = aspect_x / aspect_y
            if target_ratio > current_ratio:
                #crop top/bottom, width is the same
                new_height = width / target_ratio
                height_difference = height - new_height
                crop_right = 0
                crop_left = 0
                crop_top = height_difference / 2
                crop_bottom = crop_top
            else:
                #crop sides, height is the same
                new_width = height * target_ratio
                width_difference = width - new_width
                crop_top = 0
                crop_bottom = 0
                crop_left = width_difference / 2
                crop_right = crop_left
        else:
            crop_top = 0
            crop_right = 0
            crop_bottom = 0
            crop_left = 0
        self.crop_top = self.crop_top + crop_top
        self.crop_right = self.crop_right + crop_right
        self.crop_bottom = self.crop_bottom + crop_bottom
        self.crop_left = self.crop_left + crop_left
        self.reset_cropper()

    def crop_percent(self, side, percent):
        texture_width = self.original_width
        texture_height = self.original_height
        crop_min = self.crop_min

        if side == 'top':
            crop_amount = texture_height * percent
            if (texture_height - crop_amount - self.crop_bottom) < crop_min:
                crop_amount = texture_height - self.crop_bottom - crop_min
            self.crop_top = crop_amount
        elif side == 'right':
            crop_amount = texture_width * percent
            if (texture_width - crop_amount - self.crop_left) < crop_min:
                crop_amount = texture_width - self.crop_left - crop_min
            self.crop_right = crop_amount
        elif side == 'bottom':
            crop_amount = texture_height * percent
            if (texture_height - crop_amount - self.crop_top) < crop_min:
                crop_amount = texture_height - self.crop_top - crop_min
            self.crop_bottom = crop_amount
        else:
            crop_amount = texture_width * percent
            if (texture_width - crop_amount - self.crop_right) < crop_min:
                crop_amount = texture_width - self.crop_right - crop_min
            self.crop_left = crop_amount
        self.reset_cropper()
        if self.crop_controls:
            self.crop_controls.update_crop()

    def get_crop_percent(self):
        width = self.original_width
        height = self.original_height
        top_percent = self.crop_top / height
        right_percent = self.crop_right / width
        bottom_percent = self.crop_bottom / height
        left_percent = self.crop_left / width
        return [top_percent, right_percent, bottom_percent, left_percent]

    def get_crop_size(self):
        new_width = self.original_width - self.crop_left - self.crop_right
        new_height = self.original_height - self.crop_top - self.crop_bottom
        new_aspect = new_width / new_height
        old_aspect = self.original_width / self.original_height
        return "Size: " + str(
            int(new_width)) + "x" + str(int(new_height)) + ", Aspect: " + str(
                round(new_aspect, 2)) + " (Original: " + str(
                    round(old_aspect, 2)) + ")"

    def reset_crop(self):
        """Sets the crop values back to 0 for all sides"""

        self.crop_top = 0
        self.crop_bottom = 0
        self.crop_left = 0
        self.crop_right = 0
        self.reset_cropper(setup=True)

    def reset_cropper(self, setup=False):
        """Updates the position and size of the cropper overlay object."""

        if self.cropper:
            texture_size = self.get_texture_size()
            texture_top_edge = texture_size[0]
            texture_right_edge = texture_size[1]
            texture_bottom_edge = texture_size[2]
            texture_left_edge = texture_size[3]

            texture_width = (texture_right_edge - texture_left_edge)
            #texture_height = (texture_top_edge - texture_bottom_edge)

            divisor = self.original_width / texture_width
            top_edge = texture_top_edge - (self.crop_top / divisor)
            bottom_edge = texture_bottom_edge + (self.crop_bottom / divisor)
            left_edge = texture_left_edge + (self.crop_left / divisor)
            right_edge = texture_right_edge - (self.crop_right / divisor)
            width = right_edge - left_edge
            height = top_edge - bottom_edge

            self.cropper.pos = [left_edge, bottom_edge]
            self.cropper.size = [width, height]
            if setup:
                self.cropper.max_resizable_width = width
                self.cropper.max_resizable_height = height

    def get_texture_size(self):
        """Returns a list of the texture size coordinates.
        Returns:
            List of numbers: [Top edge, Right edge, Bottom edge, Left edge]
        """

        left_edge = (self.size[0] / 2) - (self.norm_image_size[0] / 2)
        right_edge = left_edge + self.norm_image_size[0]
        bottom_edge = (self.size[1] / 2) - (self.norm_image_size[1] / 2)
        top_edge = bottom_edge + self.norm_image_size[1]
        return [top_edge, right_edge, bottom_edge, left_edge]

    def point_over_texture(self, pos):
        """Checks if the given pos (x,y) value is over the image texture.
        Returns False if not over texture, returns point transformed to texture coordinates if over texture.
        """

        texture_size = self.get_texture_size()
        top_edge = texture_size[0]
        right_edge = texture_size[1]
        bottom_edge = texture_size[2]
        left_edge = texture_size[3]
        if pos[0] > left_edge and pos[0] < right_edge:
            if pos[1] > bottom_edge and pos[1] < top_edge:
                texture_x = pos[0] - left_edge
                texture_y = pos[1] - bottom_edge
                return [texture_x, texture_y]
        return False

    def detect_crop_edges(self, first, second):
        """Given two points, this will detect the proper crop area for the image.
        Arguments:
            first: First crop corner.
            second: Second crop corner.
        Returns a list of cropping values:
            [crop_top, crop_bottom, crop_left, crop_right]
        """

        if first[0] < second[0]:
            left = first[0]
            right = second[0]
        else:
            left = second[0]
            right = first[0]
        if first[1] < second[1]:
            top = second[1]
            bottom = first[1]
        else:
            top = first[1]
            bottom = second[1]
        scale = self.original_width / self.norm_image_size[0]
        crop_top = (self.norm_image_size[1] - top) * scale
        crop_bottom = bottom * scale
        crop_left = left * scale
        crop_right = (self.norm_image_size[0] - right) * scale
        return [crop_top, crop_bottom, crop_left, crop_right]

    def set_crop(self, posx, posy, width, height):
        """Sets the crop values based on the cropper widget."""

        texture_size = self.get_texture_size()
        texture_top_edge = texture_size[0]
        texture_right_edge = texture_size[1]
        texture_bottom_edge = texture_size[2]
        texture_left_edge = texture_size[3]

        left_crop = posx - texture_left_edge
        bottom_crop = posy - texture_bottom_edge
        right_crop = texture_right_edge - width - posx
        top_crop = texture_top_edge - height - posy

        texture_width = (texture_right_edge - texture_left_edge)
        divisor = self.original_width / texture_width
        if left_crop < 0:
            self.crop_left = 0
        else:
            self.crop_left = left_crop * divisor
        if right_crop < 0:
            self.crop_right = 0
        else:
            self.crop_right = right_crop * divisor
        if top_crop < 0:
            self.crop_top = 0
        else:
            self.crop_top = top_crop * divisor
        if bottom_crop < 0:
            self.crop_bottom = 0
        else:
            self.crop_bottom = bottom_crop * divisor
        #self.update_preview(recrop=False)
        if self.crop_controls:
            self.crop_controls.update_crop()

    def on_sharpen(self, *_):
        self.update_preview()

    def on_bilateral(self, *_):
        self.update_preview()

    def on_bilateral_amount(self, *_):
        self.update_preview()

    def on_median_blur(self, *_):
        self.update_preview()

    def on_border_opacity(self, *_):
        self.update_preview()

    def on_border_image(self, *_):
        self.update_preview()

    def on_border_x_scale(self, *_):
        self.update_preview()

    def on_border_y_scale(self, *_):
        self.update_preview()

    def on_vignette_amount(self, *_):
        self.update_preview()

    def on_vignette_size(self, *_):
        self.update_preview()

    def on_edge_blur_amount(self, *_):
        self.update_preview()

    def on_edge_blur_size(self, *_):
        self.update_preview()

    def on_edge_blur_intensity(self, *_):
        self.update_preview()

    def on_rotate_angle(self, *_):
        self.update_preview()

    def on_fine_angle(self, *_):
        self.update_preview()

    def on_flip_horizontal(self, *_):
        self.update_preview()

    def on_flip_vertical(self, *_):
        self.update_preview()

    def on_autocontrast(self, *_):
        self.update_preview()

    def on_adaptive_clip(self, *_):
        self.update_preview()

    def on_equalize(self, *_):
        self.update_preview()

    def on_brightness(self, *_):
        self.update_preview()

    def on_shadow(self, *_):
        self.update_preview()

    def on_gamma(self, *_):
        self.update_preview()

    def on_contrast(self, *_):
        self.update_preview()

    def on_saturation(self, *_):
        self.update_preview()

    def on_temperature(self, *_):
        self.update_preview()

    def on_curve(self, *_):
        self.update_preview()

    def on_tint(self, *_):
        self.update_preview()

    def on_border_tint(self, *_):
        self.update_preview()

    def on_size(self, *_):
        pass

    def on_source(self, *_):
        """The source file has been changed, reload image and regenerate preview."""

        self.video = os.path.splitext(self.source)[1].lower() in movietypes
        if self.video:
            self.open_video()
        self.reload_edit_image()
        self.update_texture(self.edit_image)
        #self.update_preview()

    def on_position(self, *_):
        pass

    def reload_edit_image(self):
        """Regenerate the edit preview image."""
        if self.video:
            if not self.player:
                return
            location = self.length * self.position
            frame = self.seek_player(location)
            frame = frame[0]
            frame_size = frame.get_size()
            pixel_format = frame.get_pixel_format()
            frame_converter = SWScale(frame_size[0],
                                      frame_size[1],
                                      pixel_format,
                                      ofmt='rgb24')
            new_frame = frame_converter.scale(frame)
            image_data = bytes(new_frame.to_bytearray()[0])

            original_image = Image.frombuffer(mode='RGB',
                                              size=(frame_size[0],
                                                    frame_size[1]),
                                              data=image_data,
                                              decoder_name='raw')
            #for some reason, video frames are read upside-down? fix it here...
            original_image = original_image.transpose(
                PIL.Image.FLIP_TOP_BOTTOM)
            self.original_width = original_image.size[0]
            self.original_height = original_image.size[1]
            self.original_image = original_image
            image = original_image.copy()

        else:
            original_image = Image.open(self.source)
            try:
                self.exif = original_image.info.get('exif', b'')
            except:
                self.exif = ''
            if self.angle != 0:
                if self.angle == 90:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_90)
                if self.angle == 180:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_180)
                if self.angle == 270:
                    original_image = original_image.transpose(
                        PIL.Image.ROTATE_270)
            self.original_width = original_image.size[0]
            self.original_height = original_image.size[1]
            image = original_image.copy()
            self.original_image = original_image.copy()
            original_image.close()
        image_width = Window.width * .75
        width = int(image_width)
        height = int(image_width * (image.size[1] / image.size[0]))
        if width < 10:
            width = 10
        if height < 10:
            height = 10
        image = image.resize((width, height))
        if image.mode != 'RGB':
            image = image.convert('RGB')
        self.size_multiple = self.original_width / image.size[0]
        self.edit_image = image
        Clock.schedule_once(
            self.update_histogram
        )  #Need to delay this because kivy will mess up the drawing of it on first load.
        #self.histogram = image.histogram()

    def update_histogram(self, *_):
        self.histogram = self.edit_image.histogram()

    def on_texture(self, instance, value):
        if value is not None:
            self.texture_size = list(value.size)
        if self.mirror:
            self.texture.flip_horizontal()

    def denoise_preview(self, width, height, pos_x, pos_y):
        left = pos_x
        right = pos_x + width
        lower = pos_y + width
        upper = pos_y
        original_image = self.original_image
        preview = original_image.crop(box=(left, upper, right, lower))
        if preview.mode != 'RGB':
            preview = preview.convert('RGB')
        preview_cv = cv2.cvtColor(numpy.array(preview), cv2.COLOR_RGB2BGR)
        preview_cv = cv2.fastNlMeansDenoisingColored(preview_cv, None,
                                                     self.luminance_denoise,
                                                     self.color_denoise,
                                                     self.search_window,
                                                     self.block_size)
        preview_cv = cv2.cvtColor(preview_cv, cv2.COLOR_BGR2RGB)
        preview = Image.fromarray(preview_cv)
        preview_bytes = BytesIO()
        preview.save(preview_bytes, 'jpeg')
        preview_bytes.seek(0)
        return preview_bytes

    def update_preview(self, denoise=False, recrop=True):
        """Update the preview image."""

        image = self.adjust_image(self.edit_image)
        if denoise and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.fastNlMeansDenoisingColored(
                open_cv_image, None, self.luminance_denoise,
                self.color_denoise, self.search_window, self.block_size)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)

        self.update_texture(image)
        self.histogram = image.histogram()
        if recrop:
            self.reset_cropper(setup=True)

    def adjust_image(self, image, preview=True):
        """Applies all current editing opterations to an image.
        Arguments:
            image: A PIL image.
            preview: Generate edit image in preview mode (faster)
        Returns: A PIL image.
        """

        if not preview:
            orientation = self.photoinfo[13]
            if orientation == 3 or orientation == 4:
                image = image.transpose(PIL.Image.ROTATE_180)
            elif orientation == 5 or orientation == 6:
                image = image.transpose(PIL.Image.ROTATE_90)
            elif orientation == 7 or orientation == 8:
                image = image.transpose(PIL.Image.ROTATE_270)
            if orientation in [2, 4, 5, 7]:
                image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
            size_multiple = self.size_multiple
        else:
            size_multiple = 1

        if self.sharpen != 0:
            enhancer = ImageEnhance.Sharpness(image)
            image = enhancer.enhance(self.sharpen + 1)
        if self.median_blur != 0 and opencv:
            max_median = 10 * size_multiple
            median = int(self.median_blur * max_median)
            if median % 2 == 0:
                median = median + 1
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.medianBlur(open_cv_image, median)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)
        if self.bilateral != 0 and self.bilateral_amount != 0 and opencv:
            diameter = int(self.bilateral * 10 * size_multiple)
            if diameter < 1:
                diameter = 1
            sigma_color = self.bilateral_amount * 100 * size_multiple
            if sigma_color < 1:
                sigma_color = 1
            sigma_space = sigma_color
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.bilateralFilter(open_cv_image, diameter,
                                                sigma_color, sigma_space)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)
        if self.vignette_amount > 0 and self.vignette_size > 0:
            vignette = Image.new(mode='RGB', size=image.size, color=(0, 0, 0))
            filter_color = int((1 - self.vignette_amount) * 255)
            vignette_mixer = Image.new(mode='L',
                                       size=image.size,
                                       color=filter_color)
            draw = ImageDraw.Draw(vignette_mixer)
            shrink_x = int((self.vignette_size * (image.size[0] / 2)) -
                           (image.size[0] / 4))
            shrink_y = int((self.vignette_size * (image.size[1] / 2)) -
                           (image.size[1] / 4))
            draw.ellipse([
                0 + shrink_x, 0 + shrink_y, image.size[0] - shrink_x,
                image.size[1] - shrink_y
            ],
                         fill=255)
            vignette_mixer = vignette_mixer.filter(
                ImageFilter.GaussianBlur(radius=(self.vignette_amount * 60) +
                                         60))
            image = Image.composite(image, vignette, vignette_mixer)
        if self.edge_blur_amount > 0 and self.edge_blur_intensity > 0 and self.edge_blur_size > 0:
            blur_image = image.filter(
                ImageFilter.GaussianBlur(radius=(self.edge_blur_amount * 30)))
            filter_color = int((1 - self.edge_blur_intensity) * 255)
            blur_mixer = Image.new(mode='L',
                                   size=image.size,
                                   color=filter_color)
            draw = ImageDraw.Draw(blur_mixer)
            shrink_x = int((self.edge_blur_size * (image.size[0] / 2)) -
                           (image.size[0] / 4))
            shrink_y = int((self.edge_blur_size * (image.size[1] / 2)) -
                           (image.size[1] / 4))
            draw.ellipse([
                0 + shrink_x, 0 + shrink_y, image.size[0] - shrink_x,
                image.size[1] - shrink_y
            ],
                         fill=255)
            blur_mixer = blur_mixer.filter(
                ImageFilter.GaussianBlur(radius=(self.edge_blur_amount * 30)))
            image = Image.composite(image, blur_image, blur_mixer)
        if self.crop_top != 0 or self.crop_bottom != 0 or self.crop_left != 0 or self.crop_right != 0:
            if preview:
                overlay = Image.new(mode='RGB',
                                    size=image.size,
                                    color=(0, 0, 0))
                divisor = self.original_width / image.size[0]
                draw = ImageDraw.Draw(overlay)
                draw.rectangle(
                    [0, 0, (self.crop_left / divisor), image.size[1]],
                    fill=(255, 255, 255))
                draw.rectangle(
                    [0, 0, image.size[0], (self.crop_top / divisor)],
                    fill=(255, 255, 255))
                draw.rectangle([(image.size[0] -
                                 (self.crop_right / divisor)), 0,
                                (image.size[0]), image.size[1]],
                               fill=(255, 255, 255))
                draw.rectangle([
                    0, (image.size[1] - (self.crop_bottom / divisor)),
                    image.size[0], image.size[1]
                ],
                               fill=(255, 255, 255))
                bright = ImageEnhance.Brightness(overlay)
                overlay = bright.enhance(.333)
                image = ImageChops.subtract(image, overlay)
            else:
                if self.crop_left >= image.size[0]:
                    crop_left = 0
                else:
                    crop_left = int(self.crop_left)
                if self.crop_top >= image.size[1]:
                    crop_top = 0
                else:
                    crop_top = int(self.crop_top)
                if self.crop_right >= image.size[0]:
                    crop_right = image.size[0]
                else:
                    crop_right = int(image.size[0] - self.crop_right)
                if self.crop_bottom >= image.size[1]:
                    crop_bottom = image.size[1]
                else:
                    crop_bottom = int(image.size[1] - self.crop_bottom)
                if self.video:
                    #ensure that image size is divisible by 2
                    new_width = crop_right - crop_left
                    new_height = crop_bottom - crop_top
                    if new_width % 2 == 1:
                        if crop_right < image.size[0]:
                            crop_right = crop_right + 1
                        else:
                            crop_right = crop_right - 1
                    if new_height % 2 == 1:
                        if crop_bottom < image.size[1]:
                            crop_bottom = crop_bottom + 1
                        else:
                            crop_bottom = crop_bottom - 1
                image = image.crop(
                    (crop_left, crop_top, crop_right, crop_bottom))
        if self.flip_horizontal:
            image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
        if self.flip_vertical:
            image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
        if self.rotate_angle != 0:
            if self.rotate_angle == 90:
                image = image.transpose(PIL.Image.ROTATE_270)
            if self.rotate_angle == 180:
                image = image.transpose(PIL.Image.ROTATE_180)
            if self.rotate_angle == 270:
                image = image.transpose(PIL.Image.ROTATE_90)
        if self.fine_angle != 0:
            total_angle = -self.fine_angle * 10
            angle_radians = math.radians(abs(total_angle))
            width, height = rotated_rect_with_max_area(image.size[0],
                                                       image.size[1],
                                                       angle_radians)
            x = int((image.size[0] - width) / 2)
            y = int((image.size[1] - height) / 2)
            if preview:
                image = image.rotate(total_angle, expand=False)
            else:
                image = image.rotate(total_angle,
                                     resample=PIL.Image.BICUBIC,
                                     expand=False)
            image = image.crop((x, y, image.size[0] - x, image.size[1] - y))
        if self.autocontrast:
            image = ImageOps.autocontrast(image)
        if self.equalize != 0:
            equalize_image = ImageOps.equalize(image)
            image = Image.blend(image, equalize_image, self.equalize)
        temperature = int(round(abs(self.temperature) * 100))
        if temperature != 0:
            temperature = temperature - 1
            if self.temperature > 0:
                kelvin = negative_kelvin[99 - temperature]
            else:
                kelvin = positive_kelvin[temperature]
            matrix = ((kelvin[0] / 255.0), 0.0, 0.0, 0.0, 0.0,
                      (kelvin[1] / 255.0), 0.0, 0.0, 0.0, 0.0,
                      (kelvin[2] / 255.0), 0.0)
            image = image.convert('RGB', matrix)
        if self.brightness != 0:
            enhancer = ImageEnhance.Brightness(image)
            image = enhancer.enhance(1 + self.brightness)
        if self.shadow != 0:
            if self.shadow < 0:
                floor = int(abs(self.shadow) * 128)
                table = [0] * floor
                remaining_length = 256 - floor
                for index in range(0, remaining_length):
                    value = int(round((index / remaining_length) * 256))
                    table.append(value)
                lut = table * 3
            else:
                floor = int(abs(self.shadow) * 128)
                table = []
                for index in range(0, 256):
                    percent = 1 - (index / 255)
                    value = int(round(index + (floor * percent)))
                    table.append(value)
                lut = table * 3
            image = image.point(lut)

        if self.gamma != 0:
            if self.gamma == -1:
                gamma = 99999999999999999
            elif self.gamma < 0:
                gamma = 1 / (self.gamma + 1)
            elif self.gamma > 0:
                gamma = 1 / ((self.gamma + 1) * (self.gamma + 1))
            else:
                gamma = 1
            lut = [pow(x / 255, gamma) * 255 for x in range(256)]
            lut = lut * 3
            image = image.point(lut)
        if self.contrast != 0:
            enhancer = ImageEnhance.Contrast(image)
            image = enhancer.enhance(1 + self.contrast)
        if self.saturation != 0:
            enhancer = ImageEnhance.Color(image)
            image = enhancer.enhance(1 + self.saturation)
        if self.tint != [1.0, 1.0, 1.0, 1.0]:
            matrix = (self.tint[0], 0.0, 0.0, 0.0, 0.0, self.tint[1], 0.0, 0.0,
                      0.0, 0.0, self.tint[2], 0.0)
            image = image.convert('RGB', matrix)
        if self.curve:
            lut = self.curve * 3
            image = image.point(lut)

        if self.denoise and not preview and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2BGR)
            open_cv_image = cv2.fastNlMeansDenoisingColored(
                open_cv_image, None, self.luminance_denoise,
                self.color_denoise, self.search_window, self.block_size)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(open_cv_image)

        if self.adaptive_clip > 0 and opencv:
            open_cv_image = cv2.cvtColor(numpy.array(image), cv2.COLOR_RGB2Lab)
            channels = cv2.split(open_cv_image)
            clahe = cv2.createCLAHE(clipLimit=(self.adaptive_clip * 4),
                                    tileGridSize=(8, 8))
            clahe_image = clahe.apply(channels[0])
            channels[0] = clahe_image
            open_cv_image = cv2.merge(channels)
            open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_Lab2RGB)
            image = Image.fromarray(open_cv_image)

        if self.border_image:
            image_aspect = image.size[0] / image.size[1]
            closest_aspect = min(self.border_image[1],
                                 key=lambda x: abs(x - image_aspect))
            index = self.border_image[1].index(closest_aspect)
            image_file = os.path.join('borders', self.border_image[2][index])
            if preview:
                resample = PIL.Image.NEAREST
            else:
                resample = PIL.Image.BICUBIC
            border_image = Image.open(image_file)
            border_crop_x = int(border_image.size[0] *
                                ((self.border_x_scale + 1) / 15))
            border_crop_y = int(border_image.size[1] *
                                ((self.border_y_scale + 1) / 15))
            border_image = border_image.crop(
                (border_crop_x, border_crop_y,
                 border_image.size[0] - border_crop_x,
                 border_image.size[1] - border_crop_y))
            border_image = border_image.resize(image.size, resample)

            if os.path.splitext(image_file)[1].lower() == '.jpg':
                alpha_file = os.path.splitext(image_file)[0] + '-mask.jpg'
                if not os.path.exists(alpha_file):
                    alpha_file = image_file
                alpha = Image.open(alpha_file)
                alpha = alpha.convert('L')
                alpha = alpha.crop((border_crop_x, border_crop_y,
                                    alpha.size[0] - border_crop_x,
                                    alpha.size[1] - border_crop_y))
                alpha = alpha.resize(image.size, resample)
            else:
                alpha = border_image.split()[-1]
                border_image = border_image.convert('RGB')
            if self.border_tint != [1.0, 1.0, 1.0, 1.0]:
                matrix = (self.border_tint[0], 0.0, 0.0, 1.0, 0.0,
                          self.border_tint[1], 0.0, 1.0, 0.0, 0.0,
                          self.border_tint[2], 1.0)
                border_image = border_image.convert('RGB', matrix)

            enhancer = ImageEnhance.Brightness(alpha)
            alpha = enhancer.enhance(self.border_opacity)
            image = Image.composite(border_image, image, alpha)

        return image

    def update_texture(self, image):
        """Saves a PIL image to the visible texture.
        Argument:
            image: A PIL image
        """

        image_bytes = BytesIO()
        image.save(image_bytes, 'jpeg')
        image_bytes.seek(0)
        self._coreimage = CoreImage(image_bytes, ext='jpg')
        self._on_tex_change()

    def get_full_quality(self):
        """Generate a full sized and full quality version of the source image.
        Returns: A PIL image.
        """

        image = self.original_image.copy()
        if not self.video:
            if self.angle != 0:
                if self.angle == 90:
                    image = image.transpose(PIL.Image.ROTATE_90)
                if self.angle == 180:
                    image = image.transpose(PIL.Image.ROTATE_180)
                if self.angle == 270:
                    image = image.transpose(PIL.Image.ROTATE_270)
        if image.mode != 'RGB':
            image = image.convert('RGB')
        image = self.adjust_image(image, preview=False)
        return image

    def close_image(self):
        self.original_image.close()
예제 #18
0
class Track:
    def __init__(self, name=None, track_id=None):
        # this block of code determines the attribute to be used to construct the object
        # i.e. wether to construct based on a search term of directly from an ID
        self.__type = None
        for attribute in [name, track_id]:
            if attribute is not None:
                self.__type = [
                    key for key, value in locals().items()
                    if value == attribute
                ][0]
        if self.__type is None:
            raise NoAttributesSupplied

        self.track_id = track_id
        self.title = name
        self.fetch_type = None

        print(':::fetching url')
        html = requests.get("https://www.youtube.com/results?search_query=" +
                            self.title.replace(' ', '+'))
        video = pafy.new(
            re.findall(r"watch\?v=(\S{11})", str(html.content))[0])
        best_stream = video.getbestaudio(preftype="wav", ftypestrict=False)

        self.ext = '.' + best_stream.extension
        self.title = filter_search_term(video.title).strip()
        self.url = best_stream.url
        self.filename = self.title + self.ext
        print('video title:::', filter_search_term(video.title))

        # dont repeat if all the data's been already fetched
        if not self.fetch_type is None:
            return

        if self.__type == 'track_id':
            track = spotify.track(self.track_id)
        elif self.__type == 'name':
            track_search = spotify.search(self.title, type='track', limit=1)
            if len(track_search['tracks']['items']) <= 0:
                print(
                    ':::track not available from spotify, doing a minimal fetch'
                )
                if '-' in self.title:
                    self.artists = [self.title.split('-')[0].strip()]
                else:
                    self.artists = None
                self.__artists_names = None
                self.album = None
                self.track_id = None
                self.genres = None
                self.fetch_type = 'minimal'
                return
            else:
                track = track_search['tracks']['items'][0]

        self.track_id = track['id']

        self.artists = []
        self.__artists_names = []
        for artist in track['artists']:
            self.artists.append(Artist(artist_id=artist['id']))
            self.__artists_names.append(artist['name'])

        self.genres = []
        for artist in self.artists:
            for genre in artist.genres:
                self.genres.append(genre)

        self.album = Album(album_id=track['album']['id'])

        self.fetch_type = 'full'

        print(':::fetched')

    def send_notification(self):
        print(":::sending notif")
        # send notification

        # fetch metadata for the track if it has'nt already been fetched
        if self.fetch_type is None:
            self.fetch_metadata()
        message = ''
        if not self.__artists_names is None:
            message = self.__artists_names
            if len(message) == 1: pass
            elif len(message) == 2: message.insert(1, ' and ')
            else:
                increment = 1
                for i in range(len(message)):
                    if not i == len(message) - 1:
                        insert_index = i + increment
                        message.insert(insert_index, ' and ')
                        increment += insert_index + 1
            message = ''.join(message)
        if not self.album is None:
            message += f'\nfrom album {self.album.name}'
        notification.notify(
            title=self.title,
            message=message,
            app_icon=r'C:\users\gadit\downloads\music_icon0.ico',
            app_name='M E L O D I N E',
            timeout=10,
            toast=False)

    def download(self, custom=None, no_part=True):
        global audio_downloader
        audio_downloader = YoutubeDL({
            #'buffersize': 512,
            #'http_chunk_size': 256,
            'audioformat': 'wav',
            'format': 'bestaudio',
            'outtmpl': self.title + self.ext,
            'extractaudio': True,
            'retries': 5,
            'continuedl': True,
            'nopart': no_part,
            'hls_prefer_native': True,
            'quiet': True
        })
        audio_downloader.extract_info(self.url)

    def play(self):
        self.fetch_metadata()
        self.download(no_part=True)
        print('::: downloaded')
        threading._start_new_thread(self.send_notification, ())

        self.player = MediaPlayer(self.filename)
        time.sleep(0.5)
        print('::: playing')

        last_pts = 0
        updated_pts = 0
        while True:
            updated_pts = int(float(str(self.player.get_pts())[:3])) - 3
            print(':::updated', updated_pts)
            # print(player.get_pts())

            while self.player.get_pause():
                time.sleep(0.5)
            if updated_pts == last_pts:
                self.player.toggle_pause()
                print("---buffered out, pausing")
                time.sleep(1)
                self.player.toggle_pause()
            if int(float(str(self.player.get_pts())[:3])) - 3 == int(
                    float(str(
                        self.player.get_metadata()['duration'])[:3])) - 3:
                print(':::breaking')
                self.player.toggle_pause()
                self.player.close_player()

            last_pts = updated_pts
            time.sleep(1)
        print(':::finished playing')
예제 #19
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.tag = self.flag = self.listtag = self.fulltag = True
        screen = QDesktopWidget().screenGeometry()
        size = self.geometry()
        self.move(int((screen.width() - size.width()) / 2),
                  int((screen.height() - size.height()) / 2))

    def keyPressEvent(self, event):
        if event.key() == Qt.Key_P:
            self.Listhide()
        if event.key() == Qt.Key_T:
            self.Fastback()
        if event.key() == Qt.Key_L:
            self.Loop()
        if event.key() == Qt.Key_Space:
            self.Play()
        if event.key() == Qt.Key_S:
            self.Stop()
        if event.key() == Qt.Key_F:
            self.Full()
        if event.key() == Qt.Key_J:
            self.Fastforward()
        if event.key() == Qt.Key_M:
            self.Mute()
        if event.key() == Qt.Key_A:
            self.svolume.setValue(self.svolume.value() + 1)
        if event.key() == Qt.Key_R:
            self.svolume.setValue(self.svolume.value() - 1)

    def eventFilter(self, sender, event):
        if (event.type() == event.ChildRemoved):
            self.Moved()
        return False

    def Listmenu(self, position):
        lm = QMenu()
        addact = QAction("添加到播放列表", self, triggered=self.Add)
        removeact = QAction("从播放列表移除", self, triggered=self.Remove)
        renameact = QAction('重命名', self, triggered=self.Rename)
        clearact = QAction('清空播放列表', self, triggered=self.Clear)
        saveact = QAction('保存当前播放列表', self, triggered=self.Saved)
        lm.addAction(addact)
        if self.list.itemAt(position):
            lm.addAction(removeact)
            lm.addAction(renameact)
        lm.addAction(clearact)
        lm.addAction(saveact)
        lm.exec_(self.list.mapToGlobal(position))

    def Listadd(self):
        self.l = []
        self.list.installEventFilter(self)
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for i in f:
                    i = i.strip()
                    name = i[0:i.find(',')]
                    filelist = i[i.find(',') + 1:len(i)]
                    self.list.addItem(name)
                    self.l.append(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        for filelist in filelists:
            name = filelist[filelist.rfind('/') + 1:filelist.rfind('.')]
            self.list.addItem(name)
            self.l.append(filelist)

    def Remove(self):
        ltmp = []
        for i in self.list.selectedIndexes():
            ltmp.append(i.row())
        ltmp.sort(reverse=True)
        for j in ltmp:
            self.list.takeItem(j)
            self.l.pop(j)

    def Rename(self):
        item = self.list.item(self.list.currentRow())
        item.setFlags(item.flags() | Qt.ItemIsEditable)
        self.list.editItem(item)

    def Clear(self):
        self.l = []
        self.list.clear()
        if os.path.isfile('CPlayerlist.txt'):
            os.remove('CPlayerlist.txt')

    def Drag(self):
        self.tmp1 = []
        self.tmp2 = self.l[:]
        for i in range(self.list.count()):
            self.tmp1.append(self.list.item(i).text())

    def Moved(self):
        for i in range(self.list.count()):
            if self.list.item(i).text() == self.tmp1[i]:
                continue
            else:
                self.l[i] = self.tmp2[self.tmp1.index(
                    self.list.item(i).text())]

    def Saved(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write('%s,%s\n' % (self.list.item(i).text(), self.l[i]))
        QMessageBox.information(self, '保存', '播放列表保存成功!')

    def Listhide(self):
        if self.listtag:
            self.frame.hide()
            self.listtag = False
        else:
            self.frame.show()
            self.listtag = True

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放,快捷键“l”')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环,快捷键“l”')

    def Play(self):
        if self.flag:
            try:
                self.playitem = self.l[self.list.currentRow()]
                if os.path.isfile("%s" % self.playitem):
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer = QTimer()
                    self.timer.start(50)
                    self.timer.timeout.connect(self.Show)
                    self.steptimer = QTimer()
                    self.steptimer.start(1000)
                    self.steptimer.timeout.connect(self.Step)
                    self.flag = False
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')
            except:
                QMessageBox.warning(self, '错误', '找不到要播放的文件!')
        else:
            if self.l[self.list.currentRow()] == self.playitem:
                self.player.toggle_pause()
                if self.player.get_pause():
                    self.timer.stop()
                    self.steptimer.stop()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
                else:
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
            else:
                self.playitem = self.l[self.list.currentRow()]
                if os.path.isfile("%s" % self.playitem):
                    self.step = 0
                    self.stime.setValue(0)
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        if self.tag:
            self.player.set_volume(self.svolume.value() / 100)
        else:
            self.player.set_volume(0)
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        if self.mediatime:
            self.stime.setMaximum(int(self.mediatime))
            mediamin, mediasec = divmod(self.mediatime, 60)
            mediahour, mediamin = divmod(mediamin, 60)
            playmin, playsec = divmod(self.step, 60)
            playhour, playmin = divmod(playmin, 60)
            self.ltime.setText(
                '%02d:%02d:%02d/%02d:%02d:%02d' %
                (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放,快捷键“Space”')
            self.lmedia.setPixmap(QPixmap(''))

    def Full(self):
        if self.fulltag:
            self.frame.hide()
            self.frame_2.hide()
            self.showFullScreen()
            self.bfull.setIcon(QIcon(r'img\exitfullscreen.png'))
            self.bfull.setToolTip('退出全屏,快捷键“f”')
            self.fulltag = False
        else:
            self.frame.show()
            self.frame_2.show()
            self.showNormal()
            self.bfull.setIcon(QIcon(r'img\expandfullscreen.png'))
            self.bfull.setToolTip('全屏,快捷键“f”')
            self.fulltag = True

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
                self.tag = False
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value() / 100)
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
                self.tag = True

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.stime.setValue(0)
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.Stop()
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        if self.flag == False:
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastforward(self):
        if self.flag == False:
            self.step += 10
            if self.step >= int(self.mediatime):
                self.stime.setValue(int(self.mediatime))
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastback(self):
        if self.flag == False:
            self.step -= 10
            if self.step <= 0:
                self.step = 0
                self.stime.setValue(0)
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')
예제 #20
0
class Window(QMainWindow, Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.Listadd()
        self.step = 0
        self.loop = 1
        self.tag = True
        self.flag = True
        self.hidetag = True
        screen = QDesktopWidget().screenGeometry()
        size = self.geometry()
        self.move(int((screen.width() - size.width()) / 2),
                  int((screen.height() - size.height()) / 2))

    def keyPressEvent(self, event):
        if event.key() == Qt.Key_P:
            self.Listhide()
        if event.key() == Qt.Key_T:
            self.Fastback()
        if event.key() == Qt.Key_L:
            self.Loop()
        if event.key() == Qt.Key_Space:
            self.Play()
        if event.key() == Qt.Key_S:
            self.Stop()
        if event.key() == Qt.Key_F:
            self.Full()
        if event.key() == Qt.Key_J:
            self.Fastforward()
        if event.key() == Qt.Key_M:
            self.Mute()
        if event.key() == Qt.Key_A:
            self.svolume.setValue(self.svolume.value() + 1)
        if event.key() == Qt.Key_R:
            self.svolume.setValue(self.svolume.value() - 1)

    def Listadd(self):
        if os.path.isfile('CPlayerlist.txt'):
            with open('CPlayerlist.txt') as f:
                for filelist in f:
                    filelist = filelist.strip()
                    self.list.addItem(filelist)

    def Add(self):
        filelists, _ = QFileDialog.getOpenFileNames(self, '添加到播放列表', '.',
                                                    '媒体文件(*)')
        self.list.addItems(filelists)
        self.Listchanged()

    def Remove(self):
        self.list.takeItem(self.list.currentRow())
        self.Listchanged()

    def Clear(self):
        self.list.clear()
        os.remove('CPlayerlist.txt')

    def Listchanged(self):
        with open('CPlayerlist.txt', 'w') as f:
            for i in range(self.list.count()):
                f.write(self.list.item(i).text() + '\n')

    def Listhide(self):
        if self.hidetag:
            self.frame.hide()
            self.hidetag = False
        else:
            self.frame.show()
            self.hidetag = True

    def Loop(self):
        if self.loop == 0:
            self.loop = 1
            self.bloop.setIcon(QIcon(r'img\withloop.png'))
            self.bloop.setToolTip('循环播放,快捷键“l”')
        else:
            self.loop = 0
            self.bloop.setIcon(QIcon(r'img\withoutloop.png'))
            self.bloop.setToolTip('取消循环,快捷键“l”')

    def Play(self):
        if self.flag:
            try:
                self.playitem = self.list.currentItem().text()
                if os.path.isfile("%s" % self.playitem):
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer = QTimer()
                    self.timer.start(50)
                    self.timer.timeout.connect(self.Show)
                    self.steptimer = QTimer()
                    self.steptimer.start(1000)
                    self.steptimer.timeout.connect(self.Step)
                    self.flag = False
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')
            except:
                QMessageBox.warning(self, '错误', '找不到要播放的文件!')
        else:
            if self.list.currentItem().text() == self.playitem:
                self.player.toggle_pause()
                if self.player.get_pause():
                    self.timer.stop()
                    self.steptimer.stop()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
                else:
                    self.timer.start()
                    self.steptimer.start()
                    self.bplay.setIcon(QIcon(r'img\pause.png'))
                    self.bplay.setToolTip('暂停,快捷键“Space”')
            else:
                self.playitem = self.list.currentItem().text()
                if os.path.isfile("%s" % self.playitem):
                    self.step = 0
                    self.stime.setValue(0)
                    self.player = MediaPlayer("%s" % self.playitem)
                    self.timer.start()
                    self.steptimer.start()
                else:
                    QMessageBox.warning(self, '错误', '找不到要播放的文件!')

    def Show(self):
        if self.tag:
            self.player.set_volume(self.svolume.value() / 100)
        else:
            self.player.set_volume(0)
        frame, self.val = self.player.get_frame()
        self.lmedia.setPixmap(QPixmap(''))
        if self.val != 'eof' and frame is not None:
            img, t = frame
            data = img.to_bytearray()[0]
            width, height = img.get_size()
            qimg = QImage(data, width, height, QImage.Format_RGB888)
            self.lmedia.setPixmap(QPixmap.fromImage(qimg))
        self.mediatime = self.player.get_metadata()['duration']
        if self.mediatime:
            self.stime.setMaximum(int(self.mediatime))
            mediamin, mediasec = divmod(self.mediatime, 60)
            mediahour, mediamin = divmod(mediamin, 60)
            playmin, playsec = divmod(self.step, 60)
            playhour, playmin = divmod(playmin, 60)
            self.ltime.setText(
                '%02d:%02d:%02d/%02d:%02d:%02d' %
                (playhour, playmin, playsec, mediahour, mediamin, mediasec))

    def Stop(self):
        if self.flag == False:
            self.player.close_player()
            self.timer.stop()
            self.steptimer.stop()
            self.step = 0
            self.loop = 1
            self.flag = True
            self.stime.setValue(0)
            self.ltime.setText('')
            self.bplay.setIcon(QIcon(r'img\play.png'))
            self.bplay.setToolTip('播放,快捷键“Space”')
            self.lmedia.setPixmap(QPixmap(''))

    def Full(self):
        if self.hidetag:
            self.setWindowFlags(Qt.FramelessWindowHint)
            rect = QApplication.desktop().geometry()
            self.setGeometry(rect)
            self.frame.hide()
            self.frame_2.hide()
            self.show()
            self.bfull.setIcon(QIcon(r'img\exitfullscreen.png'))
            self.bfull.setToolTip('退出全屏,快捷键“f”')
            self.hidetag = False
        else:
            self.setWindowFlags(Qt.Widget)
            self.setGeometry(0, 0, 1144, 705)
            self.frame.show()
            self.frame_2.show()
            screen = QDesktopWidget().screenGeometry()
            size = self.geometry()
            self.move(int((screen.width() - size.width()) / 2),
                      int((screen.height() - size.height()) / 2))
            self.show()
            self.bfull.setIcon(QIcon(r'img\expandfullscreen.png'))
            self.bfull.setToolTip('全屏,快捷键“f”')
            self.hidetag = True

    def Curvol(self):
        self.curvol = self.svolume.value()

    def Mute(self):
        if self.flag == False:
            if self.player.get_volume() != 0:
                self.player.set_volume(0)
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
                self.tag = False
            else:
                if self.svolume.value() != 0:
                    self.player.set_volume(self.svolume.value() / 100)
                else:
                    self.player.set_volume(self.curvol / 100)
                    self.svolume.setValue(self.curvol)
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
                self.tag = True

    def Volume(self):
        if self.flag == False:
            if self.svolume.value() == 0:
                self.bmute.setIcon(QIcon(r'img\withoutvolume.png'))
                self.bmute.setToolTip('取消静音,快捷键“m”')
            else:
                self.bmute.setIcon(QIcon(r'img\withvolume.png'))
                self.bmute.setToolTip('静音,快捷键“m”')
            self.player.set_volume(self.svolume.value() / 100)

    def Step(self):
        if self.step >= int(self.mediatime):
            self.step = int(self.mediatime)
            if self.loop == 0:
                self.step = 0
                self.flag = True
                self.Play()
            else:
                if self.val == 'eof':
                    self.timer.stop()
                    self.steptimer.stop()
                    self.step = 0
                    self.loop = 1
                    self.flag = True
                    self.stime.setValue(0)
                    self.player.close_player()
                    self.bplay.setIcon(QIcon(r'img\play.png'))
                    self.bplay.setToolTip('播放,快捷键“Space”')
        else:
            self.step += 1
            self.stime.setValue(self.step)

    def Slidechanged(self):
        self.step = self.stime.value()

    def Slidemoved(self):
        self.timer.start()
        self.steptimer.start()
        self.player = MediaPlayer("%s" % self.playitem,
                                  ff_opts={'ss': self.step})
        self.bplay.setIcon(QIcon(r'img\pause.png'))
        self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastforward(self):
        if self.flag == False:
            self.step += 10
            if self.step >= int(self.mediatime):
                self.stime.setValue(int(self.mediatime))
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')

    def Fastback(self):
        if self.flag == False:
            self.step -= 10
            if self.step <= 0:
                self.step = 0
            self.timer.start()
            self.steptimer.start()
            self.player = MediaPlayer("%s" % self.playitem,
                                      ff_opts={'ss': self.step})
            self.bplay.setIcon(QIcon(r'img\pause.png'))
            self.bplay.setToolTip('暂停,快捷键“Space”')
def main(_):
    with tf.Session() as sess:
        config = get_config(FLAGS)
        env = MyEnvironment(config)
        agent = Agent(config, env, sess)

        scale = 1
        # 1. first probe file, get metadata
        in_file = config.input_name
        out_file = config.output_name

        convert_num = -1
        ff_opts = {
            'out_fmt': 'yuv444p',
            'framedrop': False,
            'an': True,
            'sn': True,
        }
        player = MediaPlayer(in_file, ff_opts=ff_opts)
        # must wait for probe result, strange
        while player.get_metadata()['src_vid_size'] == (0, 0):
            time.sleep(0.01)
        meta = player.get_metadata()
        width = meta['src_vid_size'][0]
        height = meta['src_vid_size'][1]
        width_out = width * scale
        height_out = height * scale

        out_opts = {
            'pix_fmt_in': 'yuv444p',
            'pix_fmt_out': 'yuv420p',
            'width_in': width_out,
            'height_in': height_out,
            'frame_rate': meta['frame_rate'],
            'codec': 'libx264',
            #'acpect': '4:3',
        }
        lib_opts = {
            # config for BT.2020 HDR10
            # 'x265-params': 'range=pc:colorprim=bt2020:transfer=smpte2084:colormatrix=bt2020nc:crf=15',

            # config for x264 to encode video
            'x264-params': 'crf=15',
        }
        writer = MediaWriter(out_file, [out_opts],
                             lib_opts=lib_opts,
                             overwrite=True)

        frame_count = 0
        start_timestamp = 0
        while True:

            frame, val = player.get_frame()
            if val == 'eof':
                print('end of video')
                break
            elif frame is None:
                time.sleep(0.01)
            else:
                t1 = time.time() * 1000
                img, t = frame
                if frame_count == 0:
                    start_timestamp = t
                bufs = img.to_bytearray()
                assert len(bufs) >= 3

                Y = np.frombuffer(bufs[0], dtype=np.uint8)
                U = np.frombuffer(bufs[1], dtype=np.uint8)
                V = np.frombuffer(bufs[2], dtype=np.uint8)

                input_YUV = cv2.merge([Y, U, V])
                img = cv2.cvtColor(input_YUV, cv2.COLOR_YUV2RGB)
                img = np.array(img).reshape(height, width, 3)

                outputImg = agent.test_video(img)

                out = np.array(outputImg).reshape(height_out * width_out, 1, 3)
                YUV = cv2.cvtColor(out, cv2.COLOR_RGB2YUV)

                (Y, U, V) = cv2.split(YUV)

                bufs = []
                bufs.append(Y.tobytes())
                bufs.append(U.tobytes())
                bufs.append(V.tobytes())
                outputImg = Image(plane_buffers=bufs,
                                  pix_fmt='yuv444p',
                                  size=(width_out, height_out))
                t = t - start_timestamp
                writer.write_frame(img=outputImg, pts=t, stream=0)

                t2 = time.time() * 1000
                frame_count += 1
                if (frame_count % 30 == 0):
                    print('convert frame # ', frame_count)
                #print('--pts:', t)
                if frame_count >= convert_num > 0:
                    break
                # if frame_count >= 1800:
                #     break
                # print("time: ", time.time()*1000-tt)

        player.close_player()
        writer.close()
예제 #22
0
class PlayerThread(QThread):
    image_sig = pyqtSignal(QtGui.QImage)
    status_sig = pyqtSignal(bool)
    progress_sig = pyqtSignal(float)

    def __init__(self, parent):
        super().__init__(parent)
        self.label = parent.label
        self.image_sig.connect(parent.set_image)
        self.status_sig.connect(parent.set_status)
        self.progress_sig.connect(parent.set_progress)
        self.player = None
        self.duration = None
        self.progress = 0
        self.ratio_mode = Qt.KeepAspectRatio
        self.config = {}

    def set_video_name(self, video_name):
        if self.player is not None:
            self.player.close_player()
        self.player = MediaPlayer(video_name)
        self.status_sig.emit(self.player.get_pause())
        self.start()

    def set_config(self, config):
        self.config = config

    def close(self):
        if self.player is not None:
            self.player.close_player()
        self.quit()

    def pause(self):
        if self.player is not None:
            self.player.set_pause(True)
            self.status_sig.emit(True)

    def toggle_pause(self):
        if self.player is not None:
            self.player.toggle_pause()
            self.status_sig.emit(self.player.get_pause())

    def next_prev(self, is_forward):
        if self.player is not None:
            chunk_position = self.find_chunk(self.progress)
            if is_forward:
                if chunk_position < self.config['total'] - 1:
                    chunk_position += 1
                    self.player.seek(self.config['chunks'][chunk_position][0] / 1000, relative=False, accurate=False)
            else:
                if chunk_position > 0:
                    chunk_position -= 1
                self.player.seek(self.config['chunks'][chunk_position][0] / 1000, relative=False, accurate=False)

    def find_chunk(self, pts):
        if self.config:
            pts_ms = int(1000 * pts)
            front = 0
            rear = self.config['total'] - 1
            chunks = self.config['chunks']
            while front != rear:
                middle = (front + rear) // 2
                if pts_ms > chunks[middle][0]:
                    if pts_ms < chunks[middle + 1][0]:
                        break
                    else:
                        front = middle + 1
                else:
                    rear = middle
            return (front + rear) // 2
        else:
            return 0

    def seek(self, ratio):
        if self.duration is not None:
            pts = ratio * self.duration
            self.player.seek(pts, relative=False, accurate=False)

    def image_stretch(self, is_stretch):
        if is_stretch:
            self.ratio_mode = Qt.IgnoreAspectRatio
        else:
            self.ratio_mode = Qt.KeepAspectRatio

    def run(self):
        val = ''
        while val != 'eof':
            frame, val = self.player.get_frame()
            if self.duration is None:
                self.duration = self.player.get_metadata()['duration']
            if val != 'eof' and frame is not None:
                img, t = frame
                if img is not None:
                    byte = img.to_bytearray()[0]
                    width, height = img.get_size()
                    convert_to_qt_format = QtGui.QImage(byte, width, height, QImage.Format_RGB888)
                    p = convert_to_qt_format.scaled(self.label.width(), self.label.height(), self.ratio_mode)
                    self.image_sig.emit(p)
                    self.progress = t
                    if self.duration is not None:
                        self.progress_sig.emit(t / self.duration)
                    time.sleep(val)
예제 #23
0
class AMonitorService(AService):
    def __init__(self, cb=None, url=None):
        super().__init__(cb)
        self.url = url
        self.port = url.split(':')[-1]
        self.tryTimer = None
        self.player = None

    def install(self):
        sdk = getAndroidSdk()
        if sdk < 0:
            time.sleep(1)
            return
        curdir = getCurrentPath()
        ret = ecall(adb_path() + ' install -r -g ' + curdir +
                    '/app/MonitorService.apk')

    def _start(self):
        cmds = adb_path(
        ) + ' shell am start com.rock_chips.monitorservice/.MainActivity'
        self.popen = Popen(cmds.split(), stdout=PIPE, stderr=STDOUT)
        Timer(0.1, self._processStartResult).start()

    def start(self):
        self.needStop = False
        # try connect first for fast boot
        self.connect()
        self._start()

    def _processStartResult(self):
        fd = self.popen.stdout
        line1 = fd.readline().decode()
        line2 = fd.readline().decode()
        if line1.startswith('Starting') and not line2.startswith('Error'):
            return
        if self.needStop:
            return
        # try install and start again
        self.popen = None
        self.install()
        self._start()

    def stop(self):
        self.popen = None
        self.needStop = True
        self.disconnect()

    def connect(self):
        if self.needStop:
            return
        if self.url is None:
            print('need url for connect')
            return

        ecall(adb_path() + ' forward tcp:' + self.port + ' tcp:' + self.port)
        lib_opts = {'analyzeduration': '32', 'flags': 'low_delay'}
        if self.player:
            print("monitor try reconnect!")
            self.player.close_player()
            self.player = None
        self.player = MediaPlayer(self.url,
                                  callback=self._mediaPlayerCallback,
                                  lib_opts=lib_opts)
        self.connectedTimer = Timer(0.1, self._processConnectResult)
        self.connectedTimer.start()

    def _mediaPlayerCallback(self, selector, value):
        if self.connectedTimer:
            self.connectedTimer.cancel()
            self.connectedTimer = None

        if selector in ('read:error', 'eof'):
            super().disconnect()
            self.tryTimer = None
            self.tryTimer = Timer(1, self.connect)
            self.tryTimer.start()

    def _processConnectResult(self):
        super().connect()

    def disconnect(self):
        if self.tryTimer:
            self.tryTimer.cancel()
            self.tryTimer = None
        if self.player:
            self.player.close_player()
            self.player = None
        super().disconnect()