Example #1
0
 def get_token(self, name, variant=None, url=None, roaming=False):
     """Get a token"""
     # Try to get a cached token
     if not roaming:
         cache_file = self._get_token_filename(name, variant)
         token = get_cache(cache_file, cache_dir=self._TOKEN_CACHE_DIR)
         if token:
             return token.get(name)
     # Try to refresh a token
     if variant != 'roaming' and name in ('X-VRT-Token', 'vrtlogin-at',
                                          'vrtlogin-rt'):
         cache_file = self._get_token_filename('vrtlogin-rt')
         refresh_token = get_cache(cache_file,
                                   cache_dir=self._TOKEN_CACHE_DIR)
         if refresh_token:
             token = self._get_fresh_token(refresh_token.get('vrtlogin-rt'),
                                           name)
             if token:
                 # Save token to cache
                 from json import dumps
                 cache_file = self._get_token_filename(
                     list(token.keys())[0], variant)
                 update_cache(cache_file, dumps(token),
                              self._TOKEN_CACHE_DIR)
                 return token.get(name)
     # Get a new token
     token = self._get_new_token(name, variant, url, roaming)
     if token:
         # Save token to cache
         from json import dumps
         cache_file = self._get_token_filename(
             list(token.keys())[0], variant)
         update_cache(cache_file, dumps(token), self._TOKEN_CACHE_DIR)
         return token.get(name)
     return None
Example #2
0
 def refresh(self, ttl=None):
     ''' Get a cached copy or a newer resumepoints from VRT, or fall back to a cached file '''
     if not self.is_activated():
         return
     resumepoints_json = get_cache('resume_points.json', ttl)
     if not resumepoints_json:
         from tokenresolver import TokenResolver
         xvrttoken = TokenResolver().get_xvrttoken(token_variant='user')
         if xvrttoken:
             headers = {
                 'authorization': 'Bearer ' + xvrttoken,
                 'content-type': 'application/json',
                 'Referer': 'https://www.vrt.be/vrtnu',
             }
             req = Request('https://video-user-data.vrt.be/resume_points',
                           headers=headers)
             log(2,
                 'URL post: https://video-user-data.vrt.be/resume_points')
             from json import load
             try:
                 resumepoints_json = load(urlopen(req))
             except (TypeError,
                     ValueError):  # No JSON object could be decoded
                 # Force resumepoints from cache
                 resumepoints_json = get_cache('resume_points.json',
                                               ttl=None)
             else:
                 update_cache('resume_points.json', resumepoints_json)
     if resumepoints_json:
         self._resumepoints = resumepoints_json
Example #3
0
    def playing_now(self, channel):
        ''' Return the EPG information for what is playing now '''
        now = datetime.now(dateutil.tz.tzlocal())
        epg = now
        # Daily EPG information shows information from 6AM until 6AM
        if epg.hour < 6:
            epg += timedelta(days=-1)
        # Try the cache if it is fresh
        schedule = get_cache('schedule.today.json', ttl=60 * 60)
        if not schedule:
            from json import load
            epg_url = epg.strftime(self.VRT_TVGUIDE)
            log(2, 'URL get: {url}', url=epg_url)
            schedule = load(urlopen(epg_url))
            update_cache('schedule.today.json', schedule)

        entry = find_entry(CHANNELS, 'name', channel)
        if not entry:
            return ''

        episodes = iter(schedule.get(entry.get('id'), []))

        while True:
            try:
                episode = next(episodes)
            except StopIteration:
                break
            start_date = dateutil.parser.parse(episode.get('startTime'))
            end_date = dateutil.parser.parse(episode.get('endTime'))
            if start_date <= now <= end_date:  # Now playing
                return episode.get('title')
        return ''
    def _get_stream_json(self, api_data, roaming=False):
        """Get JSON with stream details from VRT API"""
        if not api_data:
            return None

        # Try cache for livestreams
        if api_data.is_live_stream and not roaming:
            filename = api_data.video_id + '.json'
            data = get_cache(filename)
            if data:
                return data

        token_url = api_data.media_api_url + '/tokens'
        if api_data.is_live_stream:
            playertoken = self._tokenresolver.get_token('vrtPlayerToken', 'live', token_url, roaming=roaming)
        else:
            playertoken = self._tokenresolver.get_token('vrtPlayerToken', 'ondemand', token_url, roaming=roaming)

        # Construct api_url and get video json
        if not playertoken:
            return None
        api_url = api_data.media_api_url + '/videos/' + api_data.publication_id + \
            api_data.video_id + '?vrtPlayerToken=' + playertoken + '&client=' + api_data.client

        stream_json = get_url_json(url=api_url)

        # Update livestream cache if we have a livestream
        if stream_json and api_data.is_live_stream:
            from json import dumps
            # Warning: Currently, the drmExpired key in the stream_json cannot be used because it provides a wrong 6 hour ttl for the VUDRM tokens.
            # After investigation these tokens seem to have an expiration time of only two hours, so we set the expirationDate value accordingly.
            stream_json.update(expirationDate=generate_expiration_date(hours=2), vualto_license_url=self._get_vualto_license_url())
            cache_file = api_data.video_id + '.json'
            update_cache(cache_file, dumps(stream_json))
        return stream_json
    def get_tvshows(self, category=None, channel=None, feature=None):
        ''' Get all TV shows for a given category, channel or feature, optionally filtered by favorites '''
        params = dict()

        if category:
            params['facets[categories]'] = category
            cache_file = 'category.%s.json' % category

        if channel:
            params['facets[programBrands]'] = channel
            cache_file = 'channel.%s.json' % channel

        if feature:
            params['facets[programTags.title]'] = feature
            cache_file = 'featured.%s.json' % feature

        # If no facet-selection is done, we return the 'All programs' listing
        if not category and not channel and not feature:
            params['facets[transcodingStatus]'] = 'AVAILABLE'  # Required for getting results in Suggests API
            cache_file = 'programs.json'
        tvshows = get_cache(cache_file, ttl=60 * 60)  # Try the cache if it is fresh
        if not tvshows:
            from json import load
            querystring = '&'.join('{}={}'.format(key, value) for key, value in list(params.items()))
            suggest_url = self._VRTNU_SUGGEST_URL + '?' + querystring
            log(2, 'URL get: {url}', url=unquote(suggest_url))
            tvshows = load(urlopen(suggest_url))
            update_cache(cache_file, tvshows)

        return tvshows
Example #6
0
    def get_episode_items(self, date, channel):
        ''' Show episodes for a given date and channel '''
        now = datetime.now(dateutil.tz.tzlocal())
        epg = self.parse(date, now)
        epg_url = epg.strftime(self.VRT_TVGUIDE)

        self._favorites.refresh(ttl=60 * 60)

        cache_file = 'schedule.%s.json' % date
        if date in ('today', 'yesterday', 'tomorrow'):
            # Try the cache if it is fresh
            schedule = get_cache(cache_file, ttl=60 * 60)
            if not schedule:
                from json import load
                log(2, 'URL get: {url}', url=epg_url)
                schedule = load(urlopen(epg_url))
                update_cache(cache_file, schedule)
        else:
            from json import load
            log(2, 'URL get: {url}', url=epg_url)
            schedule = load(urlopen(epg_url))

        entry = find_entry(CHANNELS, 'name', channel)
        if entry:
            episodes = schedule.get(entry.get('id'), [])
        else:
            episodes = []
        episode_items = []
        for episode in episodes:

            label = self._metadata.get_label(episode)

            context_menu = []
            path = None
            if episode.get('url'):
                from statichelper import add_https_method, url_to_program
                video_url = add_https_method(episode.get('url'))
                path = url_for('play_url', video_url=video_url)
                program = url_to_program(episode.get('url'))
                context_menu, favorite_marker, watchlater_marker = self._metadata.get_context_menu(episode, program, cache_file)
                label += favorite_marker + watchlater_marker

            info_labels = self._metadata.get_info_labels(episode, date=date, channel=entry)
            info_labels['title'] = label

            episode_items.append(TitleItem(
                title=label,
                path=path,
                art_dict=self._metadata.get_art(episode),
                info_dict=info_labels,
                is_playable=True,
                context_menu=context_menu,
            ))
        return episode_items
Example #7
0
 def refresh_watchlater(self, ttl=None):
     """Get a cached copy or a newer watchLater list from VRT, or fall back to a cached file"""
     if not self.is_activated():
         return
     watchlater_dict = get_cache(self.WATCHLATER_CACHE_FILE, ttl)
     if not watchlater_dict:
         watchlater_dict = self._generate_watchlater_dict(
             self.get_watchlater())
     if watchlater_dict is not None:
         from json import dumps
         self._watchlater = watchlater_dict
         update_cache(self.WATCHLATER_CACHE_FILE, dumps(self._watchlater))
 def refresh(self, ttl=None):
     """Get a cached copy or a newer favorites from VRT, or fall back to a cached file"""
     if not self.is_activated():
         return
     favorites_dict = get_cache(self.FAVORITES_CACHE_FILE, ttl)
     if not favorites_dict:
         favorites_dict = self._generate_favorites_dict(
             self.get_favorites())
     if favorites_dict is not None:
         from json import dumps
         self._favorites = favorites_dict
         update_cache(self.FAVORITES_CACHE_FILE, dumps(self._favorites))
    def _get_vualto_license_url(self):
        """Get Widevine license URL from Vualto API"""
        # Try cache
        data = get_cache('vualto_license_url.json')
        if data:
            return data.get('la_url')

        vualto_license_url = get_url_json(url=self._VUPLAY_API_URL, fail={}).get('drm_providers', {}).get('widevine', {})
        if vualto_license_url:
            from json import dumps
            vualto_license_url.update(expirationDate=generate_expiration_date(hours=168))
            update_cache('vualto_license_url.json', dumps(vualto_license_url))
        return vualto_license_url.get('la_url')
 def refresh(self, ttl=None):
     """Get a cached copy or a newer resumepoints from VRT, or fall back to a cached file"""
     if not self.is_activated():
         return
     resumepoints_json = get_cache('resume_points.json', ttl)
     if not resumepoints_json:
         resumepoints_url = 'https://video-user-data.vrt.be/resume_points'
         headers = self.resumepoint_headers()
         if not headers:
             return
         resumepoints_json = get_url_json(url=resumepoints_url, cache='resume_points.json', headers=headers)
     if resumepoints_json is not None:
         self._data = resumepoints_json
def get_video_attributes(vrtnu_url):
    """Return a dictionary with video attributes by scraping the VRT NU website"""

    # Get cache
    cache_file = 'web_video_attrs_multi.json'
    video_attrs_multi = get_cache(cache_file, ttl=ttl('indirect'))
    if not video_attrs_multi:
        video_attrs_multi = {}
    if vrtnu_url in video_attrs_multi:
        return video_attrs_multi[vrtnu_url]

    # Scrape video attributes
    from bs4 import BeautifulSoup, SoupStrainer
    try:
        response = open_url(vrtnu_url, raise_errors='all')
    except HTTPError as exc:
        log_error('Web scraping video attributes failed: {error}', error=exc)
        return None
    if response is None:
        return None
    html_page = response.read()
    strainer = SoupStrainer(
        ['section', 'div'],
        {'class': ['video-detail__player', 'livestream__inner']})
    soup = BeautifulSoup(html_page, 'html.parser', parse_only=strainer)
    item = None
    epg_channel = None
    if '#epgchannel=' in vrtnu_url:
        epg_channel = vrtnu_url.split('#epgchannel=')[1]
    for item in soup:
        if epg_channel and epg_channel == item.get('data-epgchannel'):
            break
    if not epg_channel and len(soup) > 1:
        return None
    try:
        video_attrs = item.find(name='nui-media').attrs
    except AttributeError as exc:
        log_error('Web scraping video attributes failed: {error}', error=exc)
        return None

    # Update cache
    if vrtnu_url in video_attrs_multi:
        # Update existing
        video_attrs_multi[vrtnu_url] = video_attrs
    else:
        # Create new
        video_attrs_multi.update({vrtnu_url: video_attrs})
    from json import dumps
    update_cache(cache_file, dumps(video_attrs_multi))

    return video_attrs
    def list_categories(self):
        ''' Construct a list of category ListItems '''
        categories = []

        # Try the cache if it is fresh
        categories = get_cache('categories.json', ttl=7 * 24 * 60 * 60)

        # Try to scrape from the web
        if not categories:
            try:
                categories = self.get_categories()
            except Exception:  # pylint: disable=broad-except
                categories = []
            else:
                update_cache('categories.json', categories)

        # Use the cache anyway (better than hard-coded)
        if not categories:
            categories = get_cache('categories.json', ttl=None)

        # Fall back to internal hard-coded categories if all else fails
        from data import CATEGORIES
        if not categories:
            categories = CATEGORIES

        category_items = []
        for category in self.localize_categories(categories, CATEGORIES):
            if get_setting('showfanart', 'true') == 'true':
                thumbnail = category.get('thumbnail', 'DefaultGenre.png')
            else:
                thumbnail = 'DefaultGenre.png'
            category_items.append(TitleItem(
                title=category.get('name'),
                path=url_for('categories', category=category.get('id')),
                art_dict=dict(thumb=thumbnail, icon='DefaultGenre.png'),
                info_dict=dict(plot='[B]%s[/B]' % category.get('name'), studio='VRT'),
            ))
        return category_items
Example #13
0
def get_categories():
    """Return a list of categories by scraping the VRT NU website"""

    cache_file = 'categories.json'
    categories = []

    # Try the cache if it is fresh
    categories = get_cache(cache_file, ttl=7 * 24 * 60 * 60)

    # Try to scrape from the web
    if not valid_categories(categories):
        from bs4 import BeautifulSoup, SoupStrainer
        log(2, 'URL get: https://www.vrt.be/vrtnu/categorieen/')
        response = urlopen('https://www.vrt.be/vrtnu/categorieen/')
        tiles = SoupStrainer('nui-list--content')
        soup = BeautifulSoup(response.read(), 'html.parser', parse_only=tiles)

        categories = []
        for tile in soup.find_all('nui-tile'):
            categories.append(dict(
                id=tile.get('href').split('/')[-2],
                thumbnail=get_category_thumbnail(tile),
                name=get_category_title(tile),
            ))
        if categories:
            from json import dumps
            update_cache('categories.json', dumps(categories))

    # Use the cache anyway (better than hard-coded)
    if not valid_categories(categories):
        categories = get_cache(cache_file, ttl=None)

    # Fall back to internal hard-coded categories if all else fails
    if not valid_categories(categories):
        from data import CATEGORIES
        categories = CATEGORIES
    return categories
Example #14
0
    def live_description(self, channel):
        ''' Return the EPG information for current and next live program '''
        now = datetime.now(dateutil.tz.tzlocal())
        epg = now
        # Daily EPG information shows information from 6AM until 6AM
        if epg.hour < 6:
            epg += timedelta(days=-1)
        # Try the cache if it is fresh
        schedule = get_cache('schedule.today.json', ttl=60 * 60)
        if not schedule:
            from json import load
            epg_url = epg.strftime(self.VRT_TVGUIDE)
            log(2, 'URL get: {url}', url=epg_url)
            schedule = load(urlopen(epg_url))
            update_cache('schedule.today.json', schedule)

        entry = find_entry(CHANNELS, 'name', channel)
        if not entry:
            return ''

        episodes = iter(schedule.get(entry.get('id'), []))

        description = ''
        while True:
            try:
                episode = next(episodes)
            except StopIteration:
                break
            start_date = dateutil.parser.parse(episode.get('startTime'))
            end_date = dateutil.parser.parse(episode.get('endTime'))
            if start_date <= now <= end_date:  # Now playing
                description = '[COLOR yellow][B]%s[/B] %s[/COLOR]\n' % (localize(30421), self.episode_description(episode))
                try:
                    description += '[B]%s[/B] %s' % (localize(30422), self.episode_description(next(episodes)))
                except StopIteration:
                    break
                break
            if now < start_date:  # Nothing playing now, but this may be next
                description = '[B]%s[/B] %s\n' % (localize(30422), self.episode_description(episode))
                try:
                    description += '[B]%s[/B] %s' % (localize(30422), self.episode_description(next(episodes)))
                except StopIteration:
                    break
                break
        if not description:
            # Add a final 'No transmission' program
            description = '[COLOR yellow][B]%s[/B] %s - 06:00\n» %s[/COLOR]' % (localize(30421), episode.get('end'), localize(30423))
        return description
Example #15
0
 def refresh_resumepoints(self, ttl=None):
     """Get a cached copy or a newer resumepoints from VRT, or fall back to a cached file"""
     if not self.is_activated():
         return
     resumepoints_json = get_cache(self.RESUMEPOINTS_CACHE_FILE, ttl)
     if not resumepoints_json:
         resumepoints_url = self.RESUMEPOINTS_URL + '?max=500&sortBy=-updated'
         headers = self.resumepoints_headers()
         if not headers:
             return
         resumepoints_json = get_url_json(
             url=resumepoints_url,
             cache=self.RESUMEPOINTS_CACHE_FILE,
             headers=headers)
     if resumepoints_json is not None:
         self._resumepoints = resumepoints_json
 def refresh(self, ttl=None):
     """Get a cached copy or a newer favorites from VRT, or fall back to a cached file"""
     if not self.is_activated():
         return
     favorites_json = get_cache('favorites.json', ttl)
     if not favorites_json:
         from tokenresolver import TokenResolver
         xvrttoken = TokenResolver().get_token('X-VRT-Token', variant='user')
         if xvrttoken:
             headers = {
                 'authorization': 'Bearer ' + xvrttoken,
                 'content-type': 'application/json',
                 'Referer': 'https://www.vrt.be/vrtnu',
             }
             favorites_url = 'https://video-user-data.vrt.be/favorites'
             favorites_json = get_url_json(url=favorites_url, cache='favorites.json', headers=headers)
     if favorites_json is not None:
         self._data = favorites_json
Example #17
0
    def get_categories(self):
        """Return a list of categories"""
        cache_file = 'categories.json'

        # Try the cache if it is fresh
        categories = get_cache(cache_file, ttl=7 * 24 * 60 * 60)
        if self.valid_categories(categories):
            return categories

        # Try online categories json
        categories = self.get_online_categories()
        if self.valid_categories(categories):
            from json import dumps
            update_cache(cache_file, dumps(categories))
            return categories

        # Fall back to internal hard-coded categories
        from data import CATEGORIES
        log(2, 'Fall back to internal hard-coded categories')
        return CATEGORIES
    def _get_stream_json(self, api_data, roaming=False):
        """Get JSON with stream details from VRT API"""
        if not api_data:
            return None

        # Try cache for livestreams
        if api_data.is_live_stream and not roaming:
            filename = api_data.video_id + '.json'
            data = get_cache(filename)
            if data:
                return data

        token_url = api_data.media_api_url + '/tokens'
        if api_data.is_live_stream:
            playertoken = self._tokenresolver.get_token('vrtPlayerToken',
                                                        'live',
                                                        token_url,
                                                        roaming=roaming)
        else:
            playertoken = self._tokenresolver.get_token('vrtPlayerToken',
                                                        'ondemand',
                                                        token_url,
                                                        roaming=roaming)

        # Construct api_url and get video json
        if not playertoken:
            return None
        api_url = api_data.media_api_url + '/videos/' + api_data.publication_id + \
            api_data.video_id + '?vrtPlayerToken=' + playertoken + '&client=' + api_data.client

        stream_json = get_url_json(url=api_url)
        if stream_json and api_data.is_live_stream:
            from json import dumps
            exp = stream_json.get('drmExpired') or generate_expiration_date()
            vualto_license_url = self._get_vualto_license_url().get('la_url')
            stream_json.update(expirationDate=exp,
                               vualto_license_url=vualto_license_url)
            cache_file = api_data.video_id + '.json'
            update_cache(cache_file, dumps(stream_json))
        return stream_json
    def get_episodes(self, program=None, season=None, episodes=None, category=None, feature=None, programtype=None, keywords=None,
                     whatson_id=None, video_id=None, video_url=None, page=None, use_favorites=False, variety=None, cache_file=None):
        ''' Get episodes or season data from VRT NU Search API '''

        # Contruct params
        if page:
            page = statichelper.realpage(page)
            all_items = False
            params = {
                'from': ((page - 1) * 50) + 1,
                'i': 'video',
                'size': 50,
            }
        elif variety == 'single':
            all_items = False
            params = {
                'i': 'video',
                'size': '1',
            }
        else:
            all_items = True
            params = {
                'i': 'video',
                'size': '300',
            }

        if variety:
            season = 'allseasons'

            if variety == 'offline':
                from datetime import datetime
                import dateutil.tz
                params['facets[assetOffTime]'] = datetime.now(dateutil.tz.gettz('Europe/Brussels')).strftime('%Y-%m-%d')

            if variety == 'oneoff':
                params['facets[programType]'] = 'oneoff'

            if variety == 'watchlater':
                self._resumepoints.refresh(ttl=5 * 60)
                episode_urls = self._resumepoints.watchlater_urls()
                params['facets[url]'] = '[%s]' % (','.join(episode_urls))

            if variety == 'continue':
                self._resumepoints.refresh(ttl=5 * 60)
                episode_urls = self._resumepoints.resumepoints_urls()
                params['facets[url]'] = '[%s]' % (','.join(episode_urls))

            if use_favorites:
                program_urls = [statichelper.program_to_url(p, 'medium') for p in self._favorites.programs()]
                params['facets[programUrl]'] = '[%s]' % (','.join(program_urls))
            elif variety in ('offline', 'recent'):
                channel_filter = [channel.get('name') for channel in CHANNELS if get_setting(channel.get('name'), 'true') == 'true']
                params['facets[programBrands]'] = '[%s]' % (','.join(channel_filter))

        if program:
            params['facets[programUrl]'] = statichelper.program_to_url(program, 'medium')

        if season and season != 'allseasons':
            params['facets[seasonTitle]'] = season

        if episodes:
            params['facets[episodeNumber]'] = '[%s]' % (','.join(str(episode) for episode in episodes))

        if category:
            params['facets[categories]'] = category

        if feature:
            params['facets[programTags.title]'] = feature

        if programtype:
            params['facets[programType]'] = programtype

        if keywords:
            if not season:
                season = 'allseasons'
            params['q'] = quote_plus(statichelper.from_unicode(keywords))
            params['highlight'] = 'true'

        if whatson_id:
            params['facets[whatsonId]'] = whatson_id

        if video_id:
            params['facets[videoId]'] = video_id

        if video_url:
            params['facets[url]'] = video_url

        # Construct VRT NU Search API Url and get api data
        querystring = '&'.join('{}={}'.format(key, value) for key, value in list(params.items()))
        search_url = self._VRTNU_SEARCH_URL + '?' + querystring.replace(' ', '%20')  # Only encode spaces to minimize url length

        from json import load
        if cache_file:
            # Get api data from cache if it is fresh
            search_json = get_cache(cache_file, ttl=60 * 60)
            if not search_json:
                log(2, 'URL get: {url}', url=unquote(search_url))
                req = Request(search_url)
                try:
                    search_json = load(urlopen(req))
                except (TypeError, ValueError):  # No JSON object could be decoded
                    return []
                except HTTPError as exc:
                    url_length = len(req.get_selector())
                    if exc.code == 413 and url_length > 8192:
                        ok_dialog(heading='HTTP Error 413', message=localize(30967))
                        log_error('HTTP Error 413: Exceeded maximum url length: '
                                  'VRT Search API url has a length of {length} characters.', length=url_length)
                        return []
                    if exc.code == 400 and 7600 <= url_length <= 8192:
                        ok_dialog(heading='HTTP Error 400', message=localize(30967))
                        log_error('HTTP Error 400: Probably exceeded maximum url length: '
                                  'VRT Search API url has a length of {length} characters.', length=url_length)
                        return []
                    raise
                update_cache(cache_file, search_json)
        else:
            log(2, 'URL get: {url}', url=unquote(search_url))
            search_json = load(urlopen(search_url))

        # Check for multiple seasons
        seasons = None
        if 'facets[seasonTitle]' not in unquote(search_url):
            facets = search_json.get('facets', dict()).get('facets')
            seasons = next((f.get('buckets', []) for f in facets if f.get('name') == 'seasons' and len(f.get('buckets', [])) > 1), None)

        episodes = search_json.get('results', [{}])
        show_seasons = bool(season != 'allseasons')

        # Return seasons
        if show_seasons and seasons:
            return (seasons, episodes)

        api_pages = search_json.get('meta').get('pages').get('total')
        api_page_size = search_json.get('meta').get('pages').get('size')
        total_results = search_json.get('meta').get('total_results')

        if all_items and total_results > api_page_size:
            for api_page in range(1, api_pages):
                api_page_url = search_url + '&from=' + str(api_page * api_page_size + 1)
                api_page_json = load(urlopen(api_page_url))
                episodes += api_page_json.get('results', [{}])

        # Return episodes
        return episodes
Example #20
0
 def logged_in(self):
     """Whether there is an active login"""
     cache_file = self._get_token_filename('X-VRT-Token')
     return bool(get_cache(cache_file, cache_dir=self._TOKEN_CACHE_DIR))