def findvideos(item): # logger.debug('DEBUG', item) item.contentTitle = item.fulltitle itemlist = [] json = [] if 'links' in item.url: json = item.url['links'] elif 'find_links' in item.url: for link in item.url['find_links']: link['url'] = unshortenit.findlinks(link['url']) mimetype = findS = None mimetype = mimetypes.MimeTypes().guess_type(link['url'])[0] if mimetype is None: findS = servertools.get_server_from_url(link['url']) if mimetype is None and findS is None: data = support.match(link['url']).data itemlist_url = servertools.find_video_items(data=data) if len(itemlist_url): for item_url in itemlist_url: valid = True patterns = link.get('patterns', False) if patterns: valid = False for pattern in patterns: match = re.search(pattern, item_url.url) if match: valid = True break if valid: json.append({"url": item_url.url}) else: json.append(link) else: url = item.url item.url = {} json.append({"url": url}) # support.dbg() for option in json: extra = set_extra_values(item, option, item.path) itemlist.append( item.clone(url=option['url'], action='play', quality=extra.quality, contentLanguage=extra.language, extraInfo=extra.info)) videolibrary = item.url.get('videolibrary', True) item.autoplay = item.url.get('autoplay', False) item.url = '' # do not pass referer return support.server(item, itemlist=itemlist, Videolibrary=videolibrary)
def find_video_items(item=None, data=None): """ Generic function to search for videos on a page, returning an itemlist with the ready-to-use items. - If an Item is passed as an argument, the resulting items keep the parameters of the last item - If an Item is not passed, a new one is created, but it will not contain any parameters other than those of the server. @param item: Item to which you want to search for videos, this must contain the valid url @type item: Item @param data: String with the page content already downloaded (if item is not passed) @type data: str @return: returns the itemlist with the results @rtype: list """ logger.info() itemlist = [] # Download the page if data is None: data = httptools.downloadpage(item.url).data data = unshortenit.findlinks(data) # Create an item if there is no item if item is None: item = Item() # Pass the thumbnail and title fields to contentThumbnail and contentTitle else: if not item.contentThumbnail: item.contentThumbnail = item.thumbnail if not item.contentTitle: item.contentTitle = item.title # Find the links to the videos for label, url, server, thumbnail in findvideos(data): title = config.get_localized_string(70206) % label itemlist.append( item.clone(title=title, action="play", url=url, thumbnail=thumbnail, server=server, folder=False)) return itemlist
def find_video_items(item=None, data=None): """ Función genérica para buscar vídeos en una página, devolviendo un itemlist con los items listos para usar. - Si se pasa un Item como argumento, a los items resultantes mantienen los parametros del item pasado - Si no se pasa un Item, se crea uno nuevo, pero no contendra ningun parametro mas que los propios del servidor. @param item: Item al cual se quieren buscar vídeos, este debe contener la url válida @type item: Item @param data: Cadena con el contendio de la página ya descargado (si no se pasa item) @type data: str @return: devuelve el itemlist con los resultados @rtype: list """ logger.info() itemlist = [] # Descarga la página if data is None: data = httptools.downloadpage(item.url).data data = unshortenit.findlinks(data) # Crea un item si no hay item if item is None: item = Item() # Pasa los campos thumbnail y title a contentThumbnail y contentTitle else: if not item.contentThumbnail: item.contentThumbnail = item.thumbnail if not item.contentTitle: item.contentTitle = item.title # Busca los enlaces a los videos for label, url, server, thumbnail in findvideos(data): title = config.get_localized_string(70206) % label itemlist.append( item.clone(title=title, action="play", url=url, thumbnail=thumbnail, server=server, folder=False)) return itemlist
def itemlistHook(itl): ret = [] for it in itl: ep = scrapertools.find_single_match(it.title, r'(\d+x\d+)') if not ep and 'http' in it.data: # stagione intera from lib import unshortenit data = unshortenit.findlinks(it.data) def get_ep(s): srv_mod = __import__('servers.%s' % s.server, None, None, ["servers.%s" % s.server]) if hasattr(srv_mod, 'get_filename'): title = srv_mod.get_filename(s.url) ep = scrapertools.get_season_and_episode(title) if ep: if ep not in episodes: episodes[ep] = [] episodes[ep].append(s) servers = support.server(item, data, CheckLinks=False, Download=False, Videolibrary=False) episodes = {} for s in servers: get_ep(s) # ottengo l'episodio dal nome del file # with futures.ThreadPoolExecutor() as executor: # for s in servers: # executor.submit(get_ep, s) # logger.debug(it.contentLanguage) ret.extend([ it.clone(title=typo(ep, 'bold') + typo(it.contentLanguage, '_ [] color kod bold'), contentSeason=int(ep.split('x')[0]), contentEpisodeNumber=int(ep.split('x')[1]), servers=[srv.tourl() for srv in episodes[ep]]) for ep in episodes ]) elif ep: ret.append(it) return sorted(ret, key=lambda i: i.title)