def parse_p_table(soup=None, field_name=None, dt_text=None, url=None): pfields = [] try: table = soup.find("dt", id=dt_text).parent.find("table") ps = table.find("th", text="Type:").parent.find_all("p") for p in ps[1:]: pfield_name = p.contents[0].split(':', 1)[0] # convert field type to elasticsearch type pfield_types = get_filed_types(pfield_name, p.contents[1].text) if '::' in pfield_types.get('bro'): logger.info(' *** parsing nested field: {}'.format(pfield_types.get('bro'))) if field_name is None: pfields += get_nested_fields(pfield_name, pfield_types, build_url(url, p.a['href'])) else: pfields += get_nested_fields(field_name + '.' + pfield_name, pfield_types, build_url(url, p.a['href'])) else: if field_name is None: logger.info(' ===> adding nested field: {}'.format(pfield_name)) pfields.append(dict(field=pfield_name, types=pfield_types, description="")) else: logger.info(' ===> adding nested field: {}'.format(field_name + '.' + pfield_name)) pfields.append(dict(field=field_name + '.' + pfield_name, types=pfield_types, description="")) except Exception as e: logger.error('Failed to parse field_name: {} and dt_text: {}'.format(field_name, dt_text)) logger.exception(e.message) return pfields
def top_index(): addon_handle = int(sys.argv[1]) url = utils.build_url({ 'play': '/en/live/', 'nid': 'live' }) listitem = xbmcgui.ListItem(label='Live stream') xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=listitem, isFolder=False) for sid in scrape_season_list(): url = utils.build_url({ 'sid': sid }) listitem = xbmcgui.ListItem(label=sid) xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=listitem, isFolder=True) xbmcplugin.endOfDirectory(addon_handle) xbmcplugin.setContent(addon_handle, content='tvshows')
def update_constraint_description(self, constraint, headers): res = requests.put(build_url('constraint/' + constraint['_id']), json={"description": constraint['description']}, headers=headers) Logger.success('Status code : {}'.format( res.status_code)) if res.status_code == 200 else Logger.error( 'Status code : {}'.format(res.status_code)) if res.status_code == 200: obj = res.json() Logger.info(str(obj)) if obj['description'] == constraint['description']: Logger.success('Description successfully update to {}.'.format( obj['description'])) return True else: Logger.error('Description is "{}" and should be "{}".'.format( obj['description'], constraint['description'])) return False Logger.success('Constraint updated') return True else: Logger.error(res.content) return False
def main(): # get options from console. options = args() # get configuration from file. config = get_conf(options['config_file']) # create ES connection to hosts. connections.create_connection(hosts=config['elasticsearch']['hosts'], timeout=30) # create the searcher instance to find alarms, given the options from # console. searcher = Searcher(options['from'], options['query'], ttime=options['to'], per_page=500, min_priority=options['min_priority']) buckets = [ PathClassBucket( utils.build_url(config['kibana']['host'], config['kibana']['secure'])) ] # manually fetch all alarms from the searcher and pass it to every bucket. for alarm in searcher.pages(): for bucket in buckets: bucket.cherry_pick(alarm) # dump all buckets, this will print out all buckets. for bucket in buckets: bucket.dump()
def create_constraint(self, constraint, headers): # Post the request res = requests.post(build_url('constraint'), json=constraint, headers=headers) Logger.success('Status code : {}'.format( res.status_code)) if res.status_code == 200 else Logger.error( 'Status code : {}'.format(res.status_code)) # Constraint created if res.status_code == 200: obj = res.json() Logger.info(str(obj)) constraint['_id'] = obj['_id'] Logger.success('Constraint created id : ' + constraint['_id']) return True # Constraint post failed elif res.status_code == 500: Logger.error(res.json()) else: Logger.error(res.text) return False
def insert_or_update_entry(filename): basename, extension = splitext(filename) if extension in ['.markdown', '.meta']: result = urlfetch.fetch(url = build_url(filename)) if result.status_code == 200: entry = Entry.get_or_insert(basename) if extension.endswith('.markdown'): import markdown2 html = markdown2.markdown(result.content.decode('utf-8')) if html: jinja2 = Environment(extensions=[ImgurExtension]) entry.content = jinja2.from_string(html).render() else: try: meta = yaml.load(result.content) except Exception as ex: logging.error('Failed to parse YAML: %s' % ex) else: entry.title = meta['title'] entry.tags = meta['tags'] entry.published = meta['published'] entry.slug = basename entry.put() else: logging.error('failed to fetch %s' % filename)
def test_get(self): """ Hit some endpoint nb_times""" nb_time = self.conf_getInt('test_get', 'nb_time') url = self.server_url + build_url(self.endpoint) + self.encoded_params for i in range(nb_time): self.get(url, description='Getting endpoint %s, iteration %i of %i' % (self.endpoint, i, nb_time))
def get_rt_url(self): self.rt_url = utils.build_url( self.settings["host"], self.settings["port"], self.settings["username"], self.settings["password"], ) return(self.rt_url)
def get_all_comment_for_a_submission(self, submission, headers): res = requests.get(build_url('submission/{}/comment'.format(user['_id'])), headers = headers) if res.status_code == 200: Logger.success(str(res.json())) return True else: Logger.error(res.content) return False
def get_nested_fields(field_name, field_types, url): nested = [] skip_list = ["FTP::PendingCmds", "Intel::TypeSet", "Notice::ActionSet", "Files::Info", "X509::Info"] if field_types.get('bro') in skip_list: logger.info(' ===> adding SKIPPED nested field: {}'.format(field_name)) nested.append(dict(field=field_name, types=field_types, description="")) else: resp = requests.get(url=url) if not resp.ok: raise Exception("[BAD URL] {} - got status code: {}".format(url, resp.status_code)) soup = BeautifulSoup(resp.content, "html.parser") dt_text = url.split('#')[-1] if is_enum(soup, dt_text): logger.info(' ===> adding ENUM field: {}'.format(field_types.get('bro'))) nested.append(dict(field=field_name, types=field_types, description="")) else: try: dl = soup.find("dt", id=dt_text).parent.find("dl", {"class": "docutils"}) if dl is not None: for nfield in list(zip(dl.find_all("dt"), dl.find_all("dd"))): if len(nfield) == 2: nfield_name = nfield[0].contents[0].split(':', 1)[0] if nfield[1].p is not None: nfield_description = nfield[1].p.text.replace('\n', ' ').replace('\r', '') nfield_description = nfield_description.encode('ascii', 'ignore').decode('ascii') else: nfield_description = "" # convert field type to elasticsearch type nfield_types = get_filed_types(nfield_name, nfield[0].contents[1].text) if 'conn_id' in nfield_types.get('bro'): nested += add_conn_id(prefix=field_name) elif '::' in nfield_types.get('bro'): # recursively call get_nested_fields to get next layer of nested fields logger.info(' ** parsing nested field: {}'.format(nfield_types.get('bro'))) nested += get_nested_fields(field_name + '.' + nfield_name, nfield_types, build_url(url, nfield[0].a['href'])) else: logger.info(' ===> adding nested field: {}'.format(field_name + '.' + nfield_name)) nested.append( dict( field=field_name + '.' + nfield_name, types=nfield_types, description=nfield_description)) else: logger.warn(' ===> unable to parse nested field type: {}. Trying as p-table...'.format( field_types.get('bro'))) nested += parse_p_table(soup=soup, field_name=field_name, dt_text=dt_text, url=url) except Exception as e: logger.error('parsing field: {}, type: {}'.format(field_name, field_types.get('bro'))) logger.exception(e.message) return nested
def get_submissions(kwargs): ''' Keyword arguments are fed into this function, builds URL based on arguments Then makes a GET request to the URL :param kwargs: :return: JSON response ''' search_subreddit_url = 'http://api.pushshift.io/reddit/search/submission/' url = build_url(search_subreddit_url, kwargs) response = get_request(url) return jsonify(response.json())
def delete_contest(self, contest, headers): res = requests.delete(build_url('contest/' + contest['_id']), headers=headers) if res.status_code == 200: Logger.info(str(res.json())) Logger.success('contest deleted') return True else: Logger.error(res.content) return False
def delete_submission(self, submission, headers): res = requests.delete(build_url('submission/' + submission['_id']), headers = headers) if res.status_code == 200: obj = res.json() Logger.info(str(obj)) Logger.success('submission deleted') return True else: Logger.error(res.content) return False
def POST(self): args = web.input() print(args) host = args.get("host", None) port = args.get("port", 80) username = args.get("username", None) password = args.get("password", None) url = utils.build_url(host, port, username, password) conn_status = utils.test_xmlrpc_connection(url) return(process_output(to_json(conn_status)))
def delete_vote(self, vote, headers): res = requests.delete(build_url('vote/' + vote['_id']), headers=headers) if res.status_code == 200: obj = res.json() Logger.info(str(obj)) Logger.success('Vote deleted') return True else: Logger.error(res.content) return False
def get_user_by_id(self, user, headers): res = requests.get(build_url('user/' + user['_id']), headers=headers) Logger.success('Status code : {}'.format( res.status_code)) if res.status_code == 200 else Logger.error( 'Status code : {}'.format(res.status_code)) if res.status_code == 200: Logger.success(str(res.json())) return True else: Logger.error(res.content) return False
def fetch_ads(session: Session) -> Set[AdModel]: url = build_url() ads = [] logger.info('=== Starting fetch ads ===') response = session.get(url) if response.status_code != 200: logger.critical( '=== Unsuccessful attempt. ' 'Please check url - %s ' 'The script will be stopped ===', url) raise RequestsConnectionError( f'Unable to get urls {response.status_code}') soup = BeautifulSoup(response.content.decode('utf-8'), 'lxml') ads_items = soup.find_all('table', attrs={'summary': 'Объявление'}) logger.info('=== Start processing %s ads ===', len(ads_items)) for item in ads_items: item_url_obj = item.find('a', class_='marginright5') item_url, url_info, *_ = item_url_obj.attrs.get('href').split('#') if not settings.WITH_PROMOTED and 'promoted' in url_info: continue try: price = int( item.find( 'p', class_='price').text.split(' грн.')[0].strip().replace( ' ', '')) except ValueError: logger.exception('=== Error during parsing a price ===') continue day = item.select('small > span')[1].text.strip().split(' ')[0].lower() ad = AdModel( external_id=item.attrs.get('data-id'), title=item_url_obj.text.strip(), price=price, url=item_url, ) if day in settings.PUBLICATION_DATE and \ settings.MIN_PRICE <= ad.price <= settings.MAX_PRICE: ads.append(ad) result = {ad for ad in ads} logger.info('=== Found %s ads after filtering ===', len(result)) return result
def get_authorization_code_uri(self, **params): """Construct a full URL that can be used to obtain an authorization code from the provider authorization_uri. Use this URI in a client frame to cause the provider to generate an authorization code. :rtype: str """ if 'response_type' not in params: params['response_type'] = self.default_response_type params.update({ 'client_id': self.client_id, 'redirect_uri': self.redirect_uri }) return utils.build_url(self.authorization_uri, params)
def _render_tag(self, args): filename = args["src"] alt = args.get('alt', '') params = urllib.urlencode({"key" : imgur_key, "image" : build_url(filename)}) result = urlfetch.fetch("http://api.imgur.com/2/upload.json", method=urlfetch.POST, payload=params) if result.status_code == 200: data = json.loads(result.content) upload = data["upload"] image = upload["image"] links = upload["links"] html = '<div class="center"><a href="{href}"><img class="frame" width="{width}" height="{height}" src="{src}" alt="{alt}" /></a></div>' return html.format(href=links["imgur_page"], width=image["width"], height=image["height"], src=links["original"], alt=alt)
def delete_constraint(self, constraint, headers): res = requests.delete(build_url('constraint/' + constraint['_id']), headers=headers) Logger.success('Status code : {}'.format( res.status_code)) if res.status_code == 200 else Logger.error( 'Status code : {}'.format(res.status_code)) if res.status_code == 200: Logger.info(str(res.json())) Logger.success('Constraint deleted') return True else: Logger.error(res.content) return False
def like_article(request, article_id): keyword = request.GET.get('keyword') page = request.GET.get('page') article = get_object_or_404(Article, id=article_id) if request.user in article.liked_by.all(): article.liked_by.remove(request.user) else: article.liked_by.add(request.user) article.save() return HttpResponseRedirect( build_url('all_articles', get={ 'keyword': keyword, 'page': page }))
def build_show_list(): addon_handle = int(sys.argv[1]) for show in scrape(): url = utils.build_url({ 'slug': show.slug }) thumbnail_url = show.get_thumbnail() listitem = xbmcgui.ListItem(label=show.get_list_title(), iconImage=thumbnail_url, thumbnailImage=thumbnail_url) listitem.setInfo('video', show.get_xbmc_videoInfo()) listitem.setProperty('fanart_image', show.get_fanart()) xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=listitem, isFolder=True) xbmcplugin.endOfDirectory(addon_handle) xbmcplugin.setContent(addon_handle, content='tvshows')
def add_list_item(query, item=None, total=0): if item is None: item = query log.debug('query: %s item: %s', query, item) li = new_list_item(item) if item.get('video_url'): url = item.get_video_url(int(addon.getSetting('video_quality'))) log.info('video_url: %s', url) is_folder = False li.setProperty('Is_playable', 'true') li.addStreamInfo('video', item.get('stream_info')) else: is_folder = True url = utils.build_url(query) xbmcplugin.addDirectoryItem(handle, url, li, is_folder, total)
def delete_user(self, user, headers): res = requests.delete(build_url('user/' + user['_id']), headers=headers) Logger.success('Status code : {}'.format( res.status_code)) if res.status_code == 200 else Logger.error( 'Status code : {}'.format(res.status_code)) if res.status_code == 200: obj = res.json() Logger.info(str(obj)) Logger.success('User deleted') return True else: Logger.error(res.content) return False
def delete_all_constraints_from_user_id(self, constraint, headers): res = requests.delete(build_url('constraint', params='author_id={}'.format( constraint['author_id'])), headers=headers) Logger.success('Status code : {}'.format( res.status_code)) if res.status_code == 200 else Logger.error( 'Status code : {}'.format(res.status_code)) if res.status_code == 200: Logger.info(str(res.json())) Logger.success('Constraints deleted') return True else: Logger.error(res.content) return False
def _make_redirect_error_response(self, redirect_uri, err): """Return a HTTP 302 redirect response object containing the error. :param redirect_uri: Client redirect URI. :type redirect_uri: str :param err: OAuth error message. :type err: str :rtype: requests.Response """ params = { 'error': err, 'response_type': None, 'client_id': None, 'redirect_uri': None } redirect = utils.build_url(redirect_uri, params) return self._make_response(headers={'Location': redirect}, status_code=302)
def update_submission_description(self, submission, headers): res = requests.put(build_url('submission/' + submission['_id']), json={"description" : submission['description']}, headers = headers) if res.status_code == 200: obj = res.json() Logger.info(str(obj)) if obj['description'] == submission['description']: Logger.success('Description successfully update to {}.'.format(obj['description'])) return True else: Logger.error('Description is {} and should be {}.'.format(obj['description'], submission['description'])) return False else: Logger.error(res.content) return False
def _render_tag(self, args): import urllib from settings import IMGUR_KEY, IMGUR_API_UPLOAD from utils import build_url filename = args.get('src') alt = args.get('alt', '') params = urllib.urlencode({'key' : IMGUR_KEY, 'image' : build_url(filename)}) result = urlfetch.fetch(IMGUR_API_UPLOAD, method=urlfetch.POST, payload=params) if result.status_code == 200: from json import loads data = loads(result.content) upload = data["upload"] image = upload["image"] links = upload["links"] html = '<div class="centered"><img class="img-thumbnail center-block" width="{width}" height="{height}" src="{src}" alt="{alt}" /></div>' return html.format(width=image["width"], height=image["height"], src=links["original"], alt=alt)
def build_episode_list(slug): addon_handle = int(sys.argv[1]) eps = scrape(slug) for ep in eps: url = utils.build_url({ 'play': ep.slug }) thumbnail_url = ep.get_thumbnail() listitem = xbmcgui.ListItem(label=ep.get_list_title(), iconImage=thumbnail_url, thumbnailImage=thumbnail_url) listitem.setInfo('video', ep.get_xbmc_videoInfo()) listitem.addStreamInfo('video', ep.get_xbmc_videoStreamInfo()) listitem.addStreamInfo('audio', ep.get_xbmc_audioStreamInfo()) listitem.setProperty('fanart_image', ep.get_fanart()) xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=listitem, isFolder=False, totalItems=len(eps)) xbmcplugin.endOfDirectory(addon_handle) xbmcplugin.setContent(addon_handle, content='episodes')
def parse_log_info(url, log_file): log_fields = [] resp = requests.get(url=url, allow_redirects=True) if not resp.ok: raise Exception("[BAD URL] {} - got status code: {}".format(url, resp.status_code)) soup = BeautifulSoup(resp.content, "html.parser") dt_text = url.split('#', 1)[1] logger.info('[PARSING LOG] {}, field: {}'.format(log_file, dt_text)) try: dl = soup.find("dt", id=dt_text).parent.find("dl", {"class": "docutils"}) if dl is not None: for dfield in list(zip(dl.find_all("dt"), dl.find_all("dd"))): if len(dfield) == 2: field_name = dfield[0].contents[0].split(':', 1)[0] field_types = get_filed_types(field_name, dfield[0].contents[1].text) # get field description if dfield[1].p is not None: field_description = dfield[1].p.text.replace('\n', ' ').replace('\r', '') field_description = field_description.encode('ascii', 'ignore').decode('ascii') else: field_description = "" if 'conn_id' in field_types.get('bro'): log_fields += add_conn_id() elif '::' in field_types.get('bro'): logger.info(' * parsing nested log in {}, field: {}'.format(log_file, field_types.get('bro'))) log_fields += get_nested_fields(field_name, field_types, build_url(url, dfield[0].a['href'])) else: log_fields.append(dict(field=field_name, types=field_types, description=field_description)) else: logger.warn( ' ===> unable to parse fields for log: ' + log_file + '. Not a dl-table, trying as p-table...') log_fields += parse_p_table(soup=soup, field_name=None, dt_text=dt_text, url=url) except Exception as e: logger.error('parsing log: {}, field: {}'.format(log_file, dt_text)) logger.exception(e.message) return log_fields
def comment_submission(self, comment, headers): # Post the request res = requests.post(build_url('submission/{}/comment'.format(comment['submission_id'])), json = comment, headers = headers) # Comment created if res.status_code == 200: obj = res.json() Logger.info(str(obj)) comment['_id'] = obj['_id'] Logger.success('Comment created id : ' + comment['_id']) return True # Comment post failed elif res.status_code == 500: Logger.error(res.json()) else: Logger.error(res.text) return False
def vote_submission(self, vote, headers): # Post the request res = requests.post(build_url('submission/{}/vote'.format(vote['submission_id'])), json = vote, headers = headers) # Vote created if res.status_code == 200: obj = res.json() Logger.info(str(obj)) vote['_id'] = obj['_id'] Logger.success('Vote created id : ' + vote['_id']) return True # Vote post failed elif res.status_code == 500: Logger.error(res.json()) else: Logger.error(res.text) return False
def add_or_update_entry(self, filename): basename, extension = os.path.splitext(filename) if extension in [".entry", ".meta"]: result = urlfetch.fetch(url = build_url(filename)) if result.status_code == 200: entry = Entry.get_or_insert(basename) if extension.endswith(".entry"): entry.content = jinja2_env.from_string(result.content.decode('utf-8')).render() else: try: meta = yaml.load(result.content) except: logging.error("failed to parse YAML") else: entry.title = meta["name"] entry.slug = basename entry.put() else: logging.error("failed to fetch %s" % filename)
def create_contest(self, contest, headers): # Post the request res = requests.post(build_url('contest'), json=contest, headers=headers) # contest created if res.status_code == 200: obj = res.json() Logger.info(str(obj)) contest['_id'] = obj['_id'] Logger.success('contest created id : ' + contest['_id']) return True # Contest post failed else: Logger.error(str(res.json())) return False
def upload_file(self, image, object_name): """ :param image: File to upload :param object_name: S3 object name. If not specified then file_name is used :return: True if file was uploaded, else False """ # If S3 object_name was not specified, use file_name try: # Upload the file bucket = base.BUCKET base.s3.upload_fileobj(image, bucket, object_name) file_url = build_url(base.S3_BASE_URL, bucket + '/' + object_name + '/') print(file_url) return file_url except Exception: return False
def update_comment_comment(self, comment, headers): res = requests.put(build_url('comment/' + comment['_id']), json={"comment": comment['comment']}, headers=headers) if res.status_code == 200: obj = res.json() Logger.info(str(obj)) if obj['comment'] == comment['comment']: Logger.success('Comment successfully update to {}.'.format( obj['comment'])) return True else: Logger.error('Comment is {} and should be {}.'.format( obj['comment'], comment['comment'])) return False else: Logger.error(res.content) return False
def similar(self, app_id, detailed=False, **kwargs): """Sends a GET request, follows the redirect, and retrieves a list of applications similar to the specified app. :param app_id: app to retrieve details from, e.g. 'com.nintendo.zaaa' :param detailed: if True, sends request per app for its full detail :return: a list of similar apps """ url = build_url('similar', app_id) response = send_request('GET', url, params=self.params, allow_redirects=True) soup = BeautifulSoup(response.content, 'lxml', from_encoding='utf8') if detailed: apps = self._parse_multiple_apps(response) else: apps = [parse_card_info(app) for app in soup.select('div[data-uitype=500]')] return apps
def details(self, app_id): """Sends a GET request and parses an application's details. :param app_id: the app to retrieve details, e.g. 'com.nintendo.zaaa' :return: a dictionary of app details """ url = build_url('details', app_id) try: response = send_request('GET', url, params=self.params) soup = BeautifulSoup(response.content, 'lxml', from_encoding='utf8') except requests.exceptions.HTTPError as e: raise ValueError('Invalid application ID: {app}. {error}'.format( app=app_id, error=e)) app_json = parse_app_details(soup) app_json.update({ "app_id": app_id, "url": url, }) return app_json
def connexion_get_token(self, user): # Post the request res = requests.post(build_url('user/connexion'), json=user) Logger.success('Status code : {}'.format( res.status_code)) if res.status_code == 200 else Logger.error( 'Status code : {}'.format(res.status_code)) # User logged if res.status_code == 200: obj = res.json() Logger.info(str(obj)) Logger.success('User (' + user['_id'] + ') logged and get token : ' + obj['token']) return obj['token'] # User can't login elif res.status_code == 500: Logger.error(res.json()) else: Logger.error(res.text) return False
def build_index(sid="", eid="", category="", vtid=""): addon_handle = int(sys.argv[1]) if sid == "": top_index() # event eg. Phillip Island #if eid == "": elif category == "": cat_list = [ ('Superbike', 'sbk'), ('Supersport', 'ssp'), ('Supersport 300', 'ssp300'), ('Superstock 1000', 'stk'), ('Superstock 600', 'st6'), ('European Junior Cup', 'ejc') ] for (i, cat) in enumerate(cat_list): url = utils.build_url({ 'sid': sid, 'category': cat_list[i][1]}) listitem = xbmcgui.ListItem(label=cat_list[i][0]) xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=listitem, isFolder=True) xbmcplugin.endOfDirectory(addon_handle) xbmcplugin.setContent(addon_handle, content='tvshows') elif vtid == "": vtid_list = [ ('Full sessions', 6), ('Highlights', 5), ('Features', 2), ('Interviews', 1), ('Season review', 17), ] for (i, vt) in enumerate(vtid_list): url = utils.build_url({ 'sid': sid, 'category': category, 'vtid': vtid_list[i][1]}) listitem = xbmcgui.ListItem(label=vtid_list[i][0]) xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=listitem, isFolder=True) xbmcplugin.endOfDirectory(addon_handle) xbmcplugin.setContent(addon_handle, content='tvshows') else: vids = get_vids(sid=sid, category=category, vtid=vtid) for v in vids: thumbnail_url = v['thumbnail'] url = utils.build_url({ 'play': v['url'], 'nid': v['nid'] }) listitem = xbmcgui.ListItem(label=v['listtitle'], iconImage=thumbnail_url, thumbnailImage=thumbnail_url) listitem.setInfo('video', get_xbmc_videoInfo(v)) listitem.addStreamInfo('audio', get_xbmc_audioStreamInfo()) listitem.addStreamInfo('video', get_xbmc_videoStreamInfo()) xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=listitem, isFolder=True) xbmcplugin.endOfDirectory(addon_handle) xbmcplugin.setContent(addon_handle, content='episodes')