def audio_url(self): """ audio_url is the only property retrived using youtube_dl, it's much more expensive than the rest """ self._audio_url = pykka.ThreadingFuture() def job(): try: info = youtube_dl.YoutubeDL({ "format": "bestaudio/best", "proxy": self.proxy, "nocheckcertificate": True, }).extract_info( url="https://www.youtube.com/watch?v=%s" % self.id, download=False, ie_key=None, extra_info={}, process=True, force_generic_extractor=False, ) except Exception as e: logger.error('audio_url error "%s"', e) self._audio_url.set(None) return self._audio_url.set(info["url"]) ThreadPool.run(job)
def run(proxy, func, timeout=60.0): """ Helper asking the zookeeper proxy actor to run the specified closure and blocking until either the timeout is reached or a response is received. :type proxy: string :type func: callable :type timeout: float :param proxy: our ancillary zookeeper proxy actor :param func: the closure to run within the proxy actor :param timeout: optional timeout in seconds :rtype: dict """ try: latch = pykka.ThreadingFuture() proxy.tell({'request': 'execute', 'latch': latch, 'function': func}) Event() out = latch.get(timeout=timeout) if isinstance(out, Exception): raise out return out except Timeout: assert 0, 'request timeout'
def add(obj): added = False for k in fields: if "_" + k not in obj.__dict__: obj.__dict__["_" + k] = pykka.ThreadingFuture() added = True return added
def thumbnails(self): # make it "async" for uniformity with Playlist.thumbnails self._thumbnails = pykka.ThreadingFuture() self._thumbnails.set([ 'https://i.ytimg.com/vi/%s/%s.jpg' % (self.id, type) for type in ['default', 'mqdefault', 'hqdefault'] ])
def thumbnails(self): # make it "async" for uniformity with Playlist.thumbnails identifier = self.id.split(".")[-1] self._thumbnails = pykka.ThreadingFuture() self._thumbnails.set([ Image(uri=f"https://i.ytimg.com/vi/{identifier}/{type}.jpg") for type in ["default", "mqdefault", "hqdefault"] ])
def search(self, query=None, uris=None): # TODO Only return results within URI roots given by ``uris`` if not query: return self._get_all_tracks() uris = query.get('uri', []) if uris: tracks = [] for uri in uris: tracks += self.lookup(uri) if len(uris) == 1: uri = uris[0] else: uri = 'spotify:search' return SearchResult(uri=uri, tracks=tracks) spotify_query = self._translate_search_query(query) if not spotify_query: logger.debug('Spotify search aborted due to empty query') return SearchResult(uri='spotify:search') logger.debug('Spotify search query: %s' % spotify_query) future = pykka.ThreadingFuture() def callback(results, userdata=None): search_result = SearchResult( uri='spotify:search:%s' % (urllib.quote(results.query().encode('utf-8'))), albums=[ translator.to_mopidy_album(a) for a in results.albums() ], artists=[ translator.to_mopidy_artist(a) for a in results.artists() ], tracks=[ translator.to_mopidy_track(t) for t in results.tracks() ]) future.set(search_result) if not self.backend.spotify.connected.is_set(): logger.debug('Not connected: Spotify search cancelled') return SearchResult(uri='spotify:search') self.backend.spotify.session.search(spotify_query, callback, album_count=200, artist_count=200, track_count=200) try: return future.get(timeout=self._timeout) except pykka.Timeout: logger.debug('Timeout: Spotify search did not return in %ds', self._timeout) return SearchResult(uri='spotify:search')
def videos(self): """ loads the list of videos of a playlist using one API call for every 50 fetched videos. For every page fetched, Video.load_info is called to start loading video info in a separate thread. """ self._videos = pykka.ThreadingFuture() def job(): data = {"items": []} page = "" while (page is not None and len(data["items"]) < self.playlist_max_videos): try: max_results = min( int(self.playlist_max_videos) - len(data["items"]), 50) result = self.api.list_playlistitems( self.id, page, max_results) except Exception as e: logger.error('list playlist items error "%s"', e) break if "error" in result: logger.error( "error in list playlist items data for", "playlist {}, page {}".format(self.id, page), ) break page = result.get("nextPageToken") or None data["items"].extend(result["items"]) del data["items"][int(self.playlist_max_videos):] myvideos = [] for item in data["items"]: set_api_data = ["title", "channel"] if "contentDetails" in item: set_api_data.append("length") if "thumbnails" in item["snippet"]: set_api_data.append("thumbnails") video = Video.get(item["snippet"]["resourceId"]["videoId"]) video._set_api_data(set_api_data, item) myvideos.append(video) # start loading video info in the background Video.load_info([ x for _, x in zip(range(self.playlist_max_videos), myvideos) ]) # noqa: E501 self._videos.set([ x for _, x in zip(range(self.playlist_max_videos), myvideos) ]) # noqa: E501 ThreadPool.run(job)
def _set_api_data(self, fields, item): """ sets the given 'fields' of 'self', based on the 'item' data retrieved through the API """ for k in fields: _k = "_" + k future = self.__dict__.get(_k) if not future: future = self.__dict__[_k] = pykka.ThreadingFuture() if not future._queue.empty(): # hack, no public is_set() continue if not item: val = None elif k == "title": val = item["snippet"]["title"] elif k == "channel": val = item["snippet"]["channelTitle"] elif k == "length": # convert PT1H2M10S to 3730 m = re.search( r"P((?P<weeks>\d+)W)?" + r"((?P<days>\d+)D)?" + r"T((?P<hours>\d+)H)?" + r"((?P<minutes>\d+)M)?" + r"((?P<seconds>\d+)S)?", item["contentDetails"]["duration"], ) val = (int(m.group("weeks") or 0) * 604800 + int(m.group("days") or 0) * 86400 + int(m.group("hours") or 0) * 3600 + int(m.group("minutes") or 0) * 60 + int(m.group("seconds") or 0)) elif k == "video_count": val = min( int(item["contentDetails"]["itemCount"]), int(self.playlist_max_videos), ) elif k == "thumbnails": val = [ val["url"] for (key, val) in item["snippet"]["thumbnails"].items() if key in ["default", "medium", "high"] ] future.set(val)
def videos(self): self._videos = pykka.ThreadingFuture() def job(): all_videos = [] page = '' while page is not None \ and len(all_videos) < self.playlist_max_videos: try: max_results = min( self.playlist_max_videos - len(all_videos), 50) data = self.api.list_playlistitems(self.id, page, max_results) except Exception as e: logger.error('list playlist items error "%s"', e) break if 'error' in data: logger.error('error in list playlist items data') break page = data.get('nextPageToken') or None myvideos = [] for item in data['items']: set_api_data = ['title', 'channel'] if 'contentDetails' in item: set_api_data.append('length') if 'thumbnails' in item['snippet']: set_api_data.append('thumbnails') video = Video.get(item['snippet']['resourceId']['videoId']) video._set_api_data(set_api_data, item) myvideos.append(video) all_videos += myvideos # start loading video info for this batch in the background Video.load_info([ x for _, x in zip(range(self.playlist_max_videos), myvideos) ]) # noqa: E501 self._videos.set([ x for _, x in zip(range(self.playlist_max_videos), all_videos) ]) # noqa: E501 ThreadPool.run(job)
def _set_api_data(self, fields, item): for k in fields: _k = '_' + k future = self.__dict__.get(_k) if not future: future = self.__dict__[_k] = pykka.ThreadingFuture() if not future._queue.empty(): # hack, no public is_set() continue if not item: val = None elif k == 'title': val = item['snippet']['title'] elif k == 'channel': val = item['snippet']['channelTitle'] elif k == 'length': # convert PT1H2M10S to 3730 m = re.search( r'P((?P<weeks>\d+)W)?' + r'((?P<days>\d+)D)?' + r'T((?P<hours>\d+)H)?' + r'((?P<minutes>\d+)M)?' + r'((?P<seconds>\d+)S)?', item['contentDetails']['duration']) val = (int(m.group('weeks') or 0) * 604800 + int(m.group('days') or 0) * 86400 + int(m.group('hours') or 0) * 3600 + int(m.group('minutes') or 0) * 60 + int(m.group('seconds') or 0)) elif k == 'video_count': val = min(item['contentDetails']['itemCount'], self.playlist_max_videos) elif k == 'thumbnails': val = [ val['url'] for (key, val) in item['snippet']['thumbnails'].items() if key in ['default', 'medium', 'high'] ] future.set(val)
def audio_url(self): self._audio_url = pykka.ThreadingFuture() def job(): try: info = youtube_dl.YoutubeDL({ 'format': 'mp4/bestaudio/vorbis/m4a/best', 'proxy': self.proxy, 'nocheckcertificate': True }).extract_info(url="https://www.youtube.com/watch?v=%s" % self.id, download=False, ie_key=None, extra_info={}, process=True, force_generic_extractor=False) except Exception as e: logger.error('audio_url error "%s"', e) self._audio_url.set(None) return self._audio_url.set(info['url']) ThreadPool.run(job)
def run(proxy, func, timeout=None): """ Helper asking the zookeeper proxy actor to run the specified closure and blocking until either the timeout is reached or a response is received. """ try: latch = pykka.ThreadingFuture() proxy.tell( { 'request': 'execute', 'latch': latch, 'function': func }) Event() out = latch.get(timeout=timeout) if isinstance(out, Exception): raise out return out except Timeout: assert 0, 'request timeout'