def set_extra_values(item, json, path): logger.debug() ret = Item() for key in json: if key == 'quality': ret.quality = json[key] if ret.quality and not ret.quality[0].isdigit(): ret.quality = ret.quality.upper() elif key == 'language': ret.language = json[key].upper() elif key == 'plot': ret.plot = json[key] elif key in ['poster', 'thumbnail']: ret.thumb = json[key] if ':/' in json[key] else filetools.join( path, json[key]) if '/' in json[key] else get_thumb(json[key]) elif key == 'fanart': ret.fanart = json[key] if ':/' in json[key] else filetools.join( path, json[key]) elif key in ['url', 'link']: ret.url = json[key] if ':/' in json[key] or type( json[key]) == dict else filetools.join(path, json[key]) elif key == 'seasons_list': ret.url = {} ret.url['seasons_list'] = json['seasons_list'] elif key == 'episodes_list': ret.url = {} ret.url['episodes_list'] = json['episodes_list'] elif key in ['links', 'find_links']: ret.url = {} ret.url[key] = json[key] ret.url['videolibrary'] = json.get('videolibrary', True) ret.url['autoplay'] = json.get('autoplay', False) elif key == 'filter': filterkey = [k for k in json[key].keys()][0] ret.filter = json[key][filterkey] ret.filterkey = filterkey elif key == 'description': ret.description = json[key] elif key == 'info': ret.info = json[key] if not ret.thumb: if 'get_search_menu' in inspect.stack()[1][3]: ret.thumb = get_thumb('search.png') else: ret.thumb = item.thumbnail if not ret.fanart: ret.fanart = item.fanart if not ret.plot: ret.plot = item.plot logger.debug(ret.url) return ret
def set_extra_values(item, json, path): support.log() ret = Item() for key in json: if key == 'quality': ret.quality = json[key].upper() elif key == 'language': ret.language = json[key].upper() elif key == 'plot': ret.plot = json[key] elif key in ['poster', 'thumbnail']: ret.thumb = json[key] if ':/' in json[key] else filetools.join( path, json[key]) if '/' in json[key] else get_thumb(json[key]) elif key == 'fanart': ret.fanart = json[key] if ':/' in json[key] else filetools.join( path, json[key]) elif key in ['url', 'link']: ret.url = json[key] if ':/' in json[key] or type( json[key]) == dict else filetools.join(path, json[key]) elif key == 'seasons_list': ret.url = {} ret.url['seasons_list'] = json['seasons_list'] elif key == 'episodes_list': ret.url = {} ret.url['episodes_list'] = json['episodes_list'] elif key == 'links': ret.url = {} ret.url['links'] = json[key] elif key == 'filter': filterkey = json[key].keys()[0] ret.filter = json[key][filterkey] ret.filterkey = filterkey elif key == 'description': ret.description = json[key] if not ret.thumb: if 'get_search_menu' in inspect.stack()[1][3]: ret.thumb = get_thumb('search.png') else: ret.thumb = item.thumbnail if not ret.fanart: ret.fanart = item.fanart if not ret.plot: ret.plot = item.plot return ret
def findvideos(item): logger.info() itemlist = [] if not item.urls: soup = get_source(item.url, soup=True) json = jsontools.load(soup.find('script', id='__NEXT_DATA__').text) seriesdata = json['props']['pageProps']['data'] seasons = seriesdata['seasons'] item.urls = seasons[0]['episodes'][0]['players'] # Recorremos la lista de servidores for option in item.urls: server = server_list.get(option['name'].lower()) # Si no hay server (server nuevo o inválido), continuamos if not server: continue url = '{}{}'.format(server_urls.get(server, ''), option['id']) serv_name = servertools.get_server_name(server) new_item = Item(action='play', channel=item.channel, infoLabels=item.infoLabels, language=item.language, server=server, thumbnail=item.thumbnail, title='{}: {} {}'.format( config.get_localized_string(60335), serv_name.title(), unify.add_languages('', item.language)), url=url) # Chequeos (asignar fanart, plot y formatear títulos) if item.fanart and not new_item.fanart: new_item.fanart = item.fanart if item.contentPlot and not new_item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) # Si es peli y podemos, agregamos el elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_pelicula_to_library", channel=item.channel, contentType="movie", contentTitle=item.contentTitle, extra="findvideos", infoLabels={'year': item.infoLabels.get('year')}, title="[COLOR yellow]{}[/COLOR]".format( config.get_localized_string(60353)), url=item.url, videolibrary=True)) return itemlist
def findvideos(item): logger.info() itemlist = [] if item.videolibrary: return seasons(item) servers = [ opcion for opcion in ({key: val for key, val in sub.items() if val} for sub in item.urls) if opcion ] # Recorremos la lista de servidores for option in servers: server = server_list.get(option['opcion'].lower()) # Si no hay server (server nuevo o inválido), continuamos if not server: continue url = '{}{}'.format(server_urls.get(server, ''), option['url']) serv_name = servertools.get_server_name(server) new_item = Item(action='play', channel=item.channel, infoLabels=item.infoLabels, language=item.language, server=server, thumbnail=item.thumbnail, title=unify.add_languages( '{}: {}'.format(config.get_localized_string(60335), serv_name.title()), item.language), url=url) # Chequeos (asignar fanart, plot y formatear títulos) if item.fanart and not new_item.fanart: new_item.fanart = item.fanart if item.contentPlot and not new_item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) # Si es peli y podemos, agregamos el elemento "Agregar a videoteca" if len(itemlist) > 0 and config.get_videolibrary_support( ) and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item(action="add_pelicula_to_library", channel=item.channel, contentType="movie", contentTitle=item.contentTitle, extra="findvideos", infoLabels={'year': item.infoLabels.get('year')}, title="[COLOR yellow]{}[/COLOR]".format( config.get_localized_string(60353)), url=item.url, videolibrary=True)) return itemlist
def findvideos(item): logger.info() itemlist = [] if item.videolibrary: return seasons(item) servers = item.urls for option in servers: url = '' server = server_list.get(option['opcion'].lower()) url = '{}{}'.format(server_urls.get(server, ''), option['url']) if not server: continue serv_name = servertools.get_server_name(server) new_item = Item( action = 'play', channel = item.channel, infoLabels = item.infoLabels, language = item.language, server = server, thumbnail = item.thumbnail, title = unify.add_languages('{}: {}'.format(config.get_localized_string(60335), serv_name.title()), item.language), url = url ) if hasattr(item, 'fanart'): new_item.fanart = item.fanart if item.contentPlot: new_item.contentPlot = item.contentPlot if not item.contentType == 'movie': unify.title_format(new_item) itemlist.append(new_item) if len(itemlist) > 0 and config.get_videolibrary_support() and item.contentType == 'movie' and not item.videolibrary: itemlist.append( Item( action = "add_pelicula_to_library", channel = item.channel, contentType = "movie", contentTitle = item.contentTitle, extra = "findvideos", infoLabels = {'year': item.infoLabels.get('year')}, title = "[COLOR yellow]{}[/COLOR]".format(config.get_localized_string(60353)), url = item.url, videolibrary = True ) ) return itemlist
def read_nfo(path_nfo, item=None): url_scraper = "" it = None if filetools.exists(path_nfo): url_scraper = filetools.read(path_nfo, 0, 1) if item: it = item.clone() it_nfo = Item().fromjson(filetools.read(path_nfo, 1)) it.infoLabels = it_nfo.infoLabels if "library_playcounts" in it_nfo: it.library_playcounts = it_nfo.library_playcounts if it_nfo.path: it.path = it_nfo.path else: it = Item().fromjson(filetools.read(path_nfo, 1)) if "fanart" in it.infoLabels: it.fanart = it.infoLabels["fanart"] return url_scraper, it
def read_nfo(path_nfo, item=None): url_scraper = "" it = None if filetools.exists(path_nfo): url_scraper = filetools.read(path_nfo, 0, 1) if item: it = item.clone() it_nfo = Item().fromjson(filetools.read(path_nfo, 1)) it.infoLabels = it_nfo.infoLabels if 'library_playcounts' in it_nfo: it.library_playcounts = it_nfo.library_playcounts if it_nfo.path: it.path = it_nfo.path else: it = Item().fromjson(filetools.read(path_nfo, 1)) if 'fanart' in it.infoLabels: it.fanart = it.infoLabels['fanart'] return url_scraper, it
def listado(item): logger.info() itemlist = [] data_thumb = httptools.downloadpage( item.url, item.post.replace("Mode=List", "Mode=Gallery")).data if not item.post: data_thumb = "" item.url = item.url.replace("/gallery,", "/list,") data = httptools.downloadpage(item.url, item.post).data data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') patron = '<div class="size">(.*?)</div></div></div>' bloques = scrapertools.find_multiple_matches(data, patron) for block in bloques: if "adult_info" in block and not adult_content: continue size = scrapertools.find_single_match(block, '<p>([^<]+)</p>') scrapedurl, scrapedtitle = scrapertools.find_single_match( block, '<div class="name"><a href="([^"]+)".*?>([^<]+)<') scrapedthumbnail = scrapertools.find_single_match( block, "background-image:url\('([^']+)'") if scrapedthumbnail: try: thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?") if data_thumb: url_thumb = scrapertools.find_single_match( data_thumb, "(%s[^']+)'" % thumb) else: url_thumb = scrapedthumbnail scrapedthumbnail = filetools.join( folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:]) except: scrapedthumbnail = "" if scrapedthumbnail: t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb]) t.setDaemon(True) t.start() else: scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png" scrapedurl = item.extra + scrapedurl title = "%s (%s)" % (scrapedtitle, size) if "adult_info" in block: title += " [COLOR %s][+18][/COLOR]" % color4 plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>') if plot: plot = scrapertools.decodeHtmlentities(plot) new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2, extra=item.extra, infoLabels={'plot': plot}, post=item.post) if item.post: try: new_item.folderurl, new_item.foldername = scrapertools.find_single_match( block, '<p class="folder"><a href="([^"]+)".*?>([^<]+)<') except: pass else: new_item.folderurl = item.url.rsplit("/", 1)[0] new_item.foldername = item.foldername new_item.fanart = item.thumbnail itemlist.append(new_item) next_page = scrapertools.find_single_match( data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"') if next_page: if item.post: post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post) url = item.url else: url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url) post = "" itemlist.append( Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page, url=url, post=post, extra=item.extra)) return itemlist
elif "timelapsepath" in cam_data[cam_id] and re.search( r'(\.flv|\.mp4)$', cam_data[cam_id]["timelapsepath"] ): video_url = cam_data[cam_id]["timelapsedomain"] + cam_data[cam_id]["timelapsepath"] elif "archivepath" in cam_data[cam_id] and re.search( r'(\.flv|\.mp4)$', cam_data[cam_id]["archivepath"] ): video_url = cam_data[cam_id]["archivedomain"] + cam_data[cam_id]["archivepath"] else: continue video_url.replace("//","/") url = calculate_url(video_url) item=Item(action="play", url=url, folder=False) try: item.title=cam_data[cam_id]["title"] except Exception, e: item.title=str(cam_id) try: item.fanart='http://static.earthcamcdn.com'+cam_data[cam_id]["offlineimage"] except Exception, e: logger.info("[channel.py] [play] ERROR: no fanart") try: item.thumbnail=cam_data[cam_id]["thumbimage"] except Exception, e: logger.info("[channel.py] [play] ERROR: no thumbnail") try: item.plot = re.sub(r'</?span[^>]*>', '', cam_data[cam_id]["description"].replace('+', ' '), flags=re.IGNORECASE ) item.plot = re.sub(r'<[^>]+>', "\n", item.plot) except Exception, e: logger.info("[channel.py] [play] ERROR: no plot") itemlist.append( item ) except Exception, e:
elif "archivepath" in cam_data[cam_id] and re.search( r'(\.flv|\.mp4)$', cam_data[cam_id]["archivepath"]): video_url = cam_data[cam_id][ "archivedomain"] + cam_data[cam_id]["archivepath"] else: continue video_url.replace("//", "/") url = calculate_url(video_url) item = Item(action="play", url=url, folder=False) try: item.title = cam_data[cam_id]["title"] except Exception, e: item.title = str(cam_id) try: item.fanart = 'http://static.earthcamcdn.com' + cam_data[ cam_id]["offlineimage"] except Exception, e: logger.info("[channel.py] [play] ERROR: no fanart") try: item.thumbnail = cam_data[cam_id]["thumbimage"] except Exception, e: logger.info("[channel.py] [play] ERROR: no thumbnail") try: item.plot = re.sub( r'</?span[^>]*>', '', cam_data[cam_id]["description"].replace('+', ' '), flags=re.IGNORECASE) item.plot = re.sub(r'<[^>]+>', "\n", item.plot) except Exception, e: logger.info("[channel.py] [play] ERROR: no plot")
def listado(item): logger.info() itemlist = [] data_thumb = httptools.downloadpage(item.url, item.post.replace("Mode=List", "Mode=Gallery")).data if not item.post: data_thumb = "" item.url = item.url.replace("/gallery,", "/list,") data = httptools.downloadpage(item.url, item.post).data data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) folder = filetools.join(config.get_data_path(), 'thumbs_copiapop') patron = '<div class="size">(.*?)</div></div></div>' bloques = scrapertools.find_multiple_matches(data, patron) for block in bloques: if "adult_info" in block and not adult_content: continue size = scrapertools.find_single_match(block, '<p>([^<]+)</p>') scrapedurl, scrapedtitle = scrapertools.find_single_match(block, '<div class="name"><a href="([^"]+)".*?>([^<]+)<') scrapedthumbnail = scrapertools.find_single_match(block, "background-image:url\('([^']+)'") if scrapedthumbnail: try: thumb = scrapedthumbnail.split("-", 1)[0].replace("?", "\?") if data_thumb: url_thumb = scrapertools.find_single_match(data_thumb, "(%s[^']+)'" % thumb) else: url_thumb = scrapedthumbnail scrapedthumbnail = filetools.join(folder, "%s.jpg" % url_thumb.split("e=", 1)[1][-20:]) except: scrapedthumbnail = "" if scrapedthumbnail: t = threading.Thread(target=download_thumb, args=[scrapedthumbnail, url_thumb]) t.setDaemon(True) t.start() else: scrapedthumbnail = item.extra + "/img/file_types/gallery/movie.png" scrapedurl = item.extra + scrapedurl title = "%s (%s)" % (scrapedtitle, size) if "adult_info" in block: title += " [COLOR %s][+18][/COLOR]" % color4 plot = scrapertools.find_single_match(block, '<div class="desc">(.*?)</div>') if plot: plot = scrapertools.decodeHtmlentities(plot) new_item = Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, text_color=color2, extra=item.extra, infoLabels={'plot': plot}, post=item.post) if item.post: try: new_item.folderurl, new_item.foldername = scrapertools.find_single_match(block, '<p class="folder"><a href="([^"]+)".*?>([^<]+)<') except: pass else: new_item.folderurl = item.url.rsplit("/", 1)[0] new_item.foldername = item.foldername new_item.fanart = item.thumbnail itemlist.append(new_item) next_page = scrapertools.find_single_match(data, '<div class="pageSplitterBorder" data-nextpage-number="([^"]+)"') if next_page: if item.post: post = re.sub(r'pageNumber=(\d+)', "pageNumber="+next_page, item.post) url = item.url else: url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url) post = "" itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page, url=url, post=post, extra=item.extra)) return itemlist