def down_web(self): try: from configs import conf except Exception as e: from .configs import conf request_url(url='http://' + conf('web_host') + ':' + str(conf('web_port')) + '/shutdown')
def start_job_and_update_job_active(jobs, job): jobs[job]['active'] = True write_yaml(conf('docs_main_path'), "ml_execute.yaml", jobs) ml_execute_api = read_yaml(conf('docs_main_path'), 'apis.yaml')['ml_execute'] url = get_api_url(ml_execute_api['host'], ml_execute_api['port'], ml_execute_api['api_name']) request_url(url, {'job': job})
def jobs(self): for j in self.job['execute']: self.api_info = self.api_infos['model_' + j['params']['model']] self.url = get_api_url(host=self.api_info['host'], port=self.api_info['port'], api_name=self.api_info['api_name']) request_url(self.url, j['params']) print("requests are sent!!!")
def down(self): self.down_web() if self.env == 'docker': popen(join(self.conf.folder, "docker-compose.yml") + " down") else: for s in self.conf.api_file: api = self.conf.api_file[s] request_url(url='http://' + api['host'] + ':' + str(api['port']) + '/shutdown') time.sleep(2)
def stop_job(self, request=True): if self.job[ 'active'] is True: # if there is active job update ml_execute.yaml self.logger.regenerate_file() self.jobs_yaml[self.job_name]['active'] = False write_yaml(conf('docs_main_path'), "ml_execute.yaml", self.jobs_yaml) for j in self.job['execute']: self.api_info = self.api_infos['model_' + j['params']['model']] self.url = get_api_url(host=self.api_info['host'], port=self.api_info['port'], api_name=self.api_info['api_name']) if request: request_url(self.url, self.job['stop_job'])
def get_selection(): """ Return the medias from 'Notre Selection' on the Auvio homepage """ items = [] common.plugin.log("scraper.get_selection()") response = utils.request_url(common.auvio_url) if not response: return items soup = BeautifulSoup(response, 'html.parser') html_nodes = soup.select( '#widget-ml-notreselection-mediahomemedia a.www-faux-link') for html_node in html_nodes: link_url = html_node['href'] media_id = utils.get_url_arg(link_url, 'id') item = api.get_media_details(media_id) if not item: continue items.append(item) return items
def get_live_videos(page=1): """ parse live video streams """ items = [] limit = int(Addon().get_setting('medias_per_page')) url_args = { 'target_site': 'media', 'origin_site': 'media', 'category_id': 0, 'start_date': '', 'offset': (page - 1) * limit, 'limit': limit, } url = generic_api_url('live', 'planninglist', url_args) json_data = utils.request_url(url) if not json_data: return nodes = json.loads(json_data) common.plugin.log('api.get_live_videos: found %d nodes' % len(nodes)) return nodes
def get_programs(): """ Get the list of shows TO FIX we should find the API for this """ common.plugin.log("scraper.get_programs()") response = utils.request_url('https://www.rtbf.be/auvio/emissions') if not response: return soup = BeautifulSoup(response, 'html.parser') nodes = soup.select('.rtbf-media-item') items = [] for node in nodes: item = {} item['id'] = node['data-id'] link = node.find('a', {'class': 'www-faux-link'}) item['name'] = link['title'].encode('utf-8').strip() items.append(item) return items
def get_media_details(id): """ Get the media details by a ID from the API. If the type is None, we'll try to query both methods. """ common.plugin.log('get_media_details') url_args = { 'target_site': 'mediaz', 'id': id, } url = generic_api_url('media', 'objectdetail', url_args) common.plugin.log("get_media_details for ID:%s" % (id)) try: json_data = utils.request_url(url) if not json_data: return data = json.loads(json_data) except: common.plugin.log_error('Unable to get media details for %s #%s' % (id)) return None common.plugin.log(json.dumps(data)) return data
def get_channel_recent_medias(id, page=1): """ Get a list of recent medias by channel ID TO FIX this should be replaced by an API call; but can't find its endpoint... """ items = [] channel = api.get_single_channel_by_id(id) channel_url = channel.get('links', {}).get('auvio_replay', None) common.plugin.log("scraper.get_channel_recent_medias()") response = utils.request_url(channel_url) if not response: return items soup = BeautifulSoup(response, 'html.parser') html_nodes = soup.select('a.www-faux-link') for html_node in html_nodes: link_url = html_node['href'] media_id = utils.get_url_arg(link_url, 'id') if not media_id: continue item = api.get_media_details(media_id) if not item: continue items.append(item) return items
def get_media_details(mid, live=False): # Get the media details by a ID from the API common.plugin.log('get_media_details') if live: url = common.cryo_base_url + 'live/planningdetail' else: url = common.cryo_base_url + 'media/objectdetail' url_params = { 'partner_key': common.cryo_partner_key, 'v': 8, 'target_site': 'mediaz', 'id': mid } common.plugin.log("api.get_media_details media #{0} - is live:{1}".format( mid, live)) try: json_data = utils.request_url(url, url_params) if not json_data: return data = json.loads(json_data) except: common.plugin.log_error( "api.get_media_details - failed for media #{0}".format(mid)) return None common.plugin.log(json.dumps(data)) return data
def get_program_list(url_params={}): #Get ALL the channels from the API (not only the menu ones), including radios. Can be filtered through optionnal url parameters. #url params url_params_default = { 'partner_key': common.cryo_partner_key, 'v': 7, 'include_drm': 'true', 'content_type': 'complete' } url_params = utils.parse_dict_args(url_params_default, url_params) common.plugin.log("api.get_program_list") url = common.cryo_base_url + 'media/objectlist' json_data = utils.request_url(url, url_params) if not json_data: return data = json.loads(json_data) return data
def get_live_videos(page=1): # parse live video streams items = [] limit = int(Addon().get_setting('medias_per_page')) url = common.cryo_base_url + 'live/planninglist' url_params = { 'target_site': 'mediaz', #'offset': (page - 1) * limit, #'limit': limit, 'partner_key': common.cryo_partner_key, 'v': 8, } #API request json_data = None json_data = utils.request_url(url, url_params) #handle datas if not json_data: return else: nodes = json.loads(json_data) common.plugin.log('api.get_live_videos: found %d nodes' % len(nodes)) return nodes
def get_user_favorites(user_token, type='media', offset=None, limit=None): nodes = [] url = common.cryo_base_url + 'media/favorite/favoritelist' url_params = { 'type': type, 'partner_key': common.cryo_partner_key, 'v': 8, 'include_drm': 'true', } if offset: url_params['offset'] = offset if limit: url_params['limit'] = limit url_headers = { 'Accept': "application/json", 'Authorization': "Bearer " + user_token, } json_data = utils.request_url(url, url_params, url_headers) if json_data: nodes = json.loads(json_data) common.plugin.log('api.get_user_favorites: found %d nodes' % len(nodes)) return nodes
def get_program_medias(id, page=1): """ Get a list of recent medias by show ID or IDs, from the API """ items = [] limit = int(Addon().get_setting('medias_per_page')) url_args = { 'program_id': id, 'target_site': 'mediaz', 'offset': (page - 1) * limit, 'limit': limit, } url = generic_api_url('media', 'objectlist', url_args) json_data = utils.request_url(url) if not json_data: return nodes = json.loads(json_data) common.plugin.log('api.get_program_medias: found %d nodes' % len(nodes)) return nodes
def log_this(self, page): params = {"limit": self.per_page, "page": page, 'extend': 1} params.update(self.params) print "--- %s ---" % Color.fail("error request") if self.debug is False: print >> History.logf, request_url(self.method, params) History.logf.flush()
def log_this(self, page): params = {"limit": FriendHistory.per_page, "page": page, 'extend': 1} params.update(self.params) if FriendHistory.debug is True: # write to screen print "--- %s ---" % Color.fail("error request") print >> FriendHistory.log_file, request_url(self.METHOD, params) FriendHistory.log_file.flush()
def get_app_settings(): #Get app settings (menu items & some other variables) url = common.cryo_base_url + 'setting/settinglist' url_params = {'partner_key': common.cryo_partner_key, 'v': 7} json_data = utils.request_url(url, url_params) datas = json.loads(json_data) common.plugin.log("api.get_app_settings") common.plugin.log(json_data) return datas
def get_radio_recent_podcasts(id, page=1): items = [] channel = api.get_single_channel_by_id(id) channel_slug = channel.get('key', '') if not channel_slug: return items podcasts_url = 'http://rss.rtbf.be/media/rss/audio/{0}_recent.xml'.format( channel_slug) response = utils.request_url(podcasts_url) if not response: return items soup = BeautifulSoup(response, 'html.parser') xml_nodes = soup.findAll('item') for xml_node in xml_nodes: #datetime pubdate = xml_node.find('pubdate').string.encode('utf-8').strip() pubdate = utils.datetime_to_W3C(pubdate) #duration duration_str = xml_node.find('itunes:duration').string.encode( 'utf-8').strip() duration = utils.convert_podcast_duration(duration_str) podcast_item = { 'title': xml_node.find('title').string.encode('utf-8').strip(), 'description': xml_node.find('description').string.encode('utf-8').strip(), 'pubdate': pubdate, 'stream_url': xml_node.find('enclosure')['url'].encode('utf-8').strip(), 'duration': duration, 'image': xml_node.find('itunes:image')['href'].encode('utf-8').strip(), } items.append(podcast_item) return items
def get_channel_current_live(channel_slug): items = [] url_args = {'target_site': 'mediaz', 'channel': channel_slug} url = generic_api_url('live', 'planningcurrent', url_args) json_data = utils.request_url(url) if not json_data: return node = json.loads(json_data) return node
def get_live_radio_config(radio_slug): """ Get radio informations from the API """ #get config.json config_url = 'http://www.rtbf.be/radio/liveradio/rtbf/radios/%s/config.json' % radio_slug json_data = utils.request_url(config_url) if not json_data: return common.plugin.log('config.json') common.plugin.log(json_data) data = json.loads(json_data) return data
def get_widget_detail(widget_id): url_params = { 'id': widget_id, 'partner_key': common.cryo_partner_key, 'v': 8, } common.plugin.log("api.get_widget_detail: #" + str(widget_id)) url = common.cryo_base_url + 'widget/widgetdetail' json_data = utils.request_url(url, url_params) if not json_data: return data = json.loads(json_data) #common.plugin.log(json_data) return data
def get_sidebar_widget_list(sidebar_id): url_params = { 'sidebar_id': sidebar_id, 'partner_key': common.cryo_partner_key, 'v': 7, } common.plugin.log("api.get_sidebar_widget_list: #" + str(sidebar_id)) url = common.cryo_base_url + 'widget/widgetlist' json_data = utils.request_url(url, url_params) if not json_data: return data = json.loads(json_data) common.plugin.log(json_data) return data
def get_base_datas(): """ Fetch the header nav datas from API and clean it (menu items) """ global main_data if main_data is None: site = 'media' # 'data-site' attr from body url = common.rtbf_url + 'news/api/menu?site=%s' % site progressdialog = xbmcgui.DialogProgress() progressdialog.create(common.plugin.addon.getAddonInfo('name')) progressdialog.update(0, 'Récupération des données...') common.plugin.log("get_base_datas") try: json_data = utils.request_url(url) if not json_data: return main_data = json.loads(json_data) #will generate unicode main_data = clean_base_datas(main_data) progressdialog.update(100, 'Done!') except: main_data = False progressdialog.update(0, 'Échec!') xbmc.sleep(1000) progressdialog.close() #common.plugin.log("before clean_base_datas:") #common.plugin.log(json.dumps(main_data)) #common.plugin.log("after clean_base_datas:") #common.plugin.log(json.dumps(main_data)) return main_data
def get_channels(): """ Get channels from the API """ global channels if not channels: common.plugin.log("api.get_channels") items = [] url = generic_api_url('epg', 'channellist') json_data = utils.request_url(url) if not json_data: return channels = json.loads(json_data) return channels
def get_user_session(user_login,user_pwd): # retrieve Gigya user session url_params = { 'loginID': user_login, 'password': user_pwd, 'apiKey': gigya_api_key, 'userKey': gigya_user_key, 'secret': gigya_secret, 'format': 'json', 'lang': 'fr', } url = gigya_accounts_base_url + 'login' json_data = utils.request_url(url,url_params) common.plugin.log('gigya get_user_session:') common.plugin.log(json_data) #no result if not json_data: common.plugin.log('gigya get_user_session: empty') return data = json.loads(json_data) #handle errors if 'errorMessage' in data: common.popup( 'gigya get_user_session: %s' % (data['errorMessage']) ) if data['errorCode'] != 0: common.plugin.log("gigya get_user_session: error code #%s" % (json['errorCode'])) return if data['statusCode'] != 200: common.plugin.log("gigya get_user_session: status code #%s" % (json['statusCode'])) return return data
def get_category_medias(id, page=1): items = [] limit = int(Addon().get_setting('medias_per_page')) url_args = { 'target_site': 'mediaz', 'category_id': id, 'offset': (page - 1) * limit, 'limit': limit, } url = generic_api_url('media', 'objectlist', url_args) json_data = utils.request_url(url) if not json_data: return nodes = json.loads(json_data) common.plugin.log('api.get_category_medias: found %d nodes' % len(nodes)) return nodes
def get_channel_list(url_params={}): #Get ALL the channels from the API (not only the menu ones), including radios. Can be filtered through optionnal url parameters. #url params url_params_default = { 'partner_key': common.cryo_partner_key, 'v': 7, } url_params = utils.parse_dict_args(url_params_default, url_params) common.plugin.log("api.get_channel_list") url = common.cryo_base_url + 'epg/channellist' json_data = utils.request_url(url, url_params) if not json_data: return data = json.loads(json_data) return data
def get_jwt(uid): #get the Gigya token based on a user ID url_params = { 'targetUID': uid, 'apiKey': gigya_api_key, 'userKey': gigya_user_key, 'secret': gigya_secret, 'format': 'json', 'lang': 'fr', } url = gigya_accounts_base_url + 'getJWT' json_data = utils.request_url(url,url_params) #no result if not json_data: common.plugin.log('gigya get_jwt: empty') return data = json.loads(json_data) #handle errors if 'errorMessage' in data: common.popup( 'gigya get_jwt: %s' % (data['errorMessage']) ) common.plugin.log( 'gigya get_jwt: %s' % (data['errorMessage']) ) if data['errorCode'] != 0: common.plugin.log("gigya get_jwt: error code #%s" % (json['errorCode'])) return if data['statusCode'] != 200: common.plugin.log("gigya get_jwt: status code #%s" % (json['statusCode'])) return return data['id_token']
def get_drm_media_auth(user_token, mid, is_live=False): #Return base64 encoded KeyOS authentication XML (Widevine) #https://www.buydrm.com/multikey-demo #https://bitmovin.com/mpeg-dash-hls-drm-test-player/ #http://dashif.org/reference/players/javascript/v2.4.1/samples/dash-if-reference-player/index.html url = common.cryo_base_url + 'drm/encauthxml' url_params = { 'partner_key': common.cryo_partner_key, 'v': 8, } #live ? if is_live: url_params['planning_id'] = mid else: url_params['media_id'] = mid url_headers = { 'Authorization': "Bearer " + user_token, } json_data = utils.request_url(url, url_params, url_headers) if json_data: data = json.loads(json_data) auth = data.get('auth_encoded_xml') common.plugin.log("media #{0} auth: {1}".format(mid, auth)) return auth return None
def get_account_info(uid): url_params = { 'UID': uid, 'include': 'profile,userInfo', 'apiKey': gigya_api_key, 'userKey': gigya_user_key, 'secret': gigya_secret, 'format': 'json', 'lang': 'fr', } url = gigya_accounts_base_url + 'getAccountInfo' json_data = utils.request_url(url,url_params) #no result if not json_data: common.plugin.log('gigya get_account_info: empty') return data = json.loads(json_data) #handle errors if 'errorMessage' in data: common.popup( 'gigya get_account_info: %s' % (data['errorMessage']) ) common.plugin.log( 'gigya get_account_info: %s' % (data['errorMessage']) ) if data['errorCode'] != 0: common.plugin.log("gigya get_account_info: error code #%s" % (json['errorCode'])) return if data['statusCode'] != 200: common.plugin.log("gigya get_account_info: status code #%s" % (json['statusCode'])) return return data['userInfo']
from conf import book_name_list import requests from utils import get_ua, get_proxy, delete_proxy, request_url, parse_detail from lxml import etree base_url = 'https://book.douban.com' url = 'https://book.douban.com/tag/%E4%B8%9C%E9%87%8E%E5%9C%AD%E5%90%BE' headers = {"User-Agent": get_ua()} api_url = 'http://127.0.0.1:5010/get/' if __name__ == '__main__': for tag in book_name_list: url = f'https://book.douban.com/tag/{tag}' while url: # response = requests.get(url, headers=headers) response = request_url(url) response = response.text html = etree.HTML(response) pic_list = html.xpath('//div[@class="pic"]') next_url = html.xpath('//span[@class="next"]/a/@href') if pic_list: for item in pic_list: time.sleep(random.randint(3, 5)) book_url = item.xpath('./a/@href')[0] book_id = re.search(r'/subject/(\d+)', book_url).group(1) img_url = item.xpath('./a/img/@src')[0] img_name = book_id + '.' + img_url.split('.')[-1] detail_url = f'https://book.douban.com/subject/{book_id}/' # detail_response = requests.get(detail_url, headers=headers).text detail_response = parse_detail(detail_url).text