def get_new_token(self): global THETVDB_V2_API_TOKEN token = THETVDB_V2_API_TOKEN.get('token', None) dt = THETVDB_V2_API_TOKEN.get('datetime', datetime.datetime.fromordinal(1)) url = '%s%s' % (self.config['base_url'], 'login') params = {'apikey': self.config['apikey']} resp = get_url(url.strip(), post_json=params, parse_json=True, raise_skip_exception=True) if resp: if 'token' in resp: token = resp['token'] dt = datetime.datetime.now() return {'token': token, 'datetime': dt}
def post(action, access_token, cid, quiet, params): params.update(dict(headers={'User-Agent': USER_AGENT}, verify=True, json=True)) params['post_data'].update(dict(chat_id=cid, parse_mode='HTML', disable_notification=quiet)) return get_url('https://api.telegram.org/bot%s/%s' % (access_token, action), **params)
def _load_url(self, url, params=None, language=None): log.debug('Retrieving URL %s' % url) session = requests.session() if self.config['cache_enabled']: session = CacheControl(session, cache=caches.FileCache( self.config['cache_location'])) if self.config['proxy']: log.debug('Using proxy for URL: %s' % url) session.proxies = { 'http': self.config['proxy'], 'https': self.config['proxy'] } headers = { 'Accept-Encoding': 'gzip,deflate', 'Authorization': 'Bearer %s' % self.get_token(), 'Accept': 'application/vnd.thetvdb.v%s' % __api_version__ } if None is not language and language in self.config['valid_languages']: headers.update({'Accept-Language': language}) resp = None is_series_info = self._match_url_pattern('url_series_info', url) if is_series_info: self.show_not_found = False self.not_found = False try: resp = get_url(url.strip(), params=params, session=session, headers=headers, parse_json=True, raise_status_code=True, raise_exceptions=True) except requests.exceptions.HTTPError as e: if 401 == e.response.status_code: # token expired, get new token, raise error to retry global THETVDB_V2_API_TOKEN THETVDB_V2_API_TOKEN = self.get_new_token() raise TvdbTokenexpired elif 404 == e.response.status_code: if is_series_info: self.show_not_found = True elif self._match_url_pattern('url_series_episodes_info', url): resp = {'data': []} self.not_found = True elif 404 != e.response.status_code: raise TvdbError except (BaseException, Exception): raise TvdbError if is_series_info and isinstance(resp, dict) and isinstance(resp.get('data'), dict) and \ isinstance(resp['data'].get('seriesName'), string_types) and \ re.search(r'^[*]\s*[*]\s*[*]', resp['data'].get('seriesName', ''), flags=re.I): self.show_not_found = True self.not_found = True map_show = { 'airstime': 'airs_time', 'airsdayofweek': 'airs_dayofweek', 'imdbid': 'imdb_id', 'writers': 'writer', 'siterating': 'rating' } def map_show_keys(data): keep_data = {} del_keys = [] new_data = {} for k, v in iteritems(data): k_org = k k = k.lower() if None is not v: if k in ['banner', 'fanart', 'poster'] and v: v = self.config['url_artworks'] % v elif 'genre' == k: keep_data['genre_list'] = v v = '|%s|' % '|'.join([ clean_data(c) for c in v if isinstance(c, string_types) ]) elif 'gueststars' == k: keep_data['gueststars_list'] = v v = '|%s|' % '|'.join([ clean_data(c) for c in v if isinstance(c, string_types) ]) elif 'writers' == k: keep_data[k] = v v = '|%s|' % '|'.join([ clean_data(c) for c in v if isinstance(c, string_types) ]) elif 'rating' == k: new_data['contentrating'] = v elif 'firstaired' == k: if v: try: v = parse(v, fuzzy=True).strftime('%Y-%m-%d') except (BaseException, Exception): v = None else: v = None elif 'imdbid' == k: if v: if re.search(r'^(tt)?\d{1,7}$', v, flags=re.I): v = clean_data(v) else: v = '' else: v = clean_data(v) else: if 'seriesname' == k: if isinstance(data.get('aliases'), list) and 0 < len(data.get('aliases')): v = data['aliases'].pop(0) # this is a invalid show, it has no Name if None is v: return None if k in map_show: k = map_show[k] if k_org is not k: del_keys.append(k_org) new_data[k] = v else: data[k] = v for d in del_keys: del (data[d]) if isinstance(data, dict): data.update(new_data) data.update(keep_data) return data if resp: if isinstance(resp['data'], dict): resp['data'] = map_show_keys(resp['data']) elif isinstance(resp['data'], list): data_list = [] for idx, row in enumerate(resp['data']): if isinstance(row, dict): cr = map_show_keys(row) if None is not cr: data_list.append(cr) resp['data'] = data_list return resp return dict([(u'data', None)])
def _cache_info_source_images(self, show_obj, img_type, num_files=0, max_files=500, force=False): # type: (TVShow, int, int, int, Optional[bool]) -> bool """ Retrieves an image of the type specified from TV info source and saves it to the cache folder :param show_obj: TVShow object to cache an image for :type show_obj: sickbeard.tv.TVShow :param img_type: BANNER, POSTER, or FANART :type img_type: int :param num_files: :type num_files: int or long :param max_files: :type max_files: int or long :param force: :type force: bool :return: bool representing success :rtype: bool """ # generate the path based on the type, tvid and prodid arg_tvid_prodid = (show_obj.tvid, show_obj.prodid) if self.POSTER == img_type: img_type_name = 'poster' dest_path = self.poster_path(*arg_tvid_prodid) elif self.BANNER == img_type: img_type_name = 'banner' dest_path = self.banner_path(*arg_tvid_prodid) elif self.FANART == img_type: img_type_name = 'fanart_all' dest_path = self.fanart_path(*arg_tvid_prodid).replace( 'fanart.jpg', '*') elif self.POSTER_THUMB == img_type: img_type_name = 'poster_thumb' dest_path = self.poster_thumb_path(*arg_tvid_prodid) elif self.BANNER_THUMB == img_type: img_type_name = 'banner_thumb' dest_path = self.banner_thumb_path(*arg_tvid_prodid) else: logger.log(u'Invalid cache image type: ' + str(img_type), logger.ERROR) return False # retrieve the image from TV info source using the generic metadata class metadata_generator = GenericMetadata() if self.FANART == img_type: image_urls = metadata_generator.retrieve_show_image( img_type_name, show_obj) if None is image_urls: return False crcs = [] for cache_file_name in ek.ek(glob.glob, dest_path): with open(cache_file_name, mode='rb') as resource: crc = '%05X' % (zlib.crc32(resource.read()) & 0xFFFFFFFF) if crc not in crcs: crcs += [crc] success = 0 count_urls = len(image_urls) sources = [] for image_url in image_urls or []: img_data = sg_helpers.get_url(image_url, nocache=True, as_binary=True) if None is img_data: continue crc = '%05X' % (zlib.crc32(img_data) & 0xFFFFFFFF) if crc in crcs: count_urls -= 1 continue crcs += [crc] img_source = (((('', 'tvdb')['thetvdb.com' in image_url], 'tvrage')['tvrage.com' in image_url], 'fatv')['fanart.tv' in image_url], 'tmdb')['tmdb' in image_url] img_xtra = '' if 'tmdb' == img_source: match = re.search(r'(?:.*\?(\d+$))?', image_url, re.I | re.M) if match and None is not match.group(1): img_xtra = match.group(1) file_desc = '%03d%s.%s.' % (num_files, ('.%s%s' % (img_source, img_xtra), '')['' == img_source], crc) cur_file_path = self.fanart_path(show_obj.tvid, show_obj.prodid, file_desc) result = metadata_generator.write_image( img_data, cur_file_path) if result and self.FANART != self.which_type(cur_file_path): try: ek.ek(os.remove, cur_file_path) except OSError as e: logger.log( u'Unable to remove %s: %s / %s' % (cur_file_path, repr(e), ex(e)), logger.WARNING) continue if img_source: sources += [img_source] num_files += (0, 1)[result] success += (0, 1)[result] if num_files > max_files: break if count_urls: total = len(ek.ek(glob.glob, dest_path)) logger.log( u'Saved %s of %s fanart images%s. Cached %s of max %s fanart file%s' % (success, count_urls, ('', ' from ' + ', '.join([x for x in list(set(sources)) ]))[0 < len(sources)], total, sickbeard.FANART_LIMIT, sg_helpers.maybe_plural(total))) return bool(count_urls) and not bool(count_urls - success) img_data = metadata_generator.retrieve_show_image( img_type_name, show_obj) if None is img_data: return False result = metadata_generator.write_image(img_data, dest_path, force=force) if result: logger.log(u'Saved image type %s' % img_type_name) return result