def instances(self): result = [] try: get_scrapers = nanscrapers.relevant_scrapers(names_list=None, include_disabled=True, exclude=None) for scraper in get_scrapers: scraper = scraper() id = scraper.name.lower() if id == 'orion' or id == 'orionoid': continue scraperNew = source() scraperNew.name = scraper.name if not hasattr( scraper, '_base_link' ): # _base_link: Do not use base_link that is defined as a property (eg: KinoX), since this can make additional HTTP requests, slowing down the process. if not scraperNew.base_link or scraperNew.base_link == '': try: scraperNew.base_link = scraper.base_link except: pass scraperNew.enabled = scraper._is_enabled() result.append(scraperNew) except: tools.Logger.error() return result
def __get_scrapers(self, include_disabled, exclude): klasses = nanscrapers.relevant_scrapers(self.host, include_disabled, exclude=exclude) scrapers = [] for klass in klasses: if klass in scraper_cache: scrapers.append(scraper_cache[klass]) else: scraper_cache[klass] = klass() scrapers.append(scraper_cache[klass]) return scrapers
def __get_scrapers(self, include_disabled, exclude): klasses = nanscrapers.relevant_scrapers(self.host, include_disabled, exclude=exclude) scrapers = [] for klass in klasses: if klass in scraper_cache: scrapers.append(scraper_cache[klass]) else: scraper_cache[klass] = klass() scrapers.append(scraper_cache[klass]) return scrapers
def instanceEnabled(self): # Get the latests setting. if not self.name == '': try: return nanscrapers.relevant_scrapers( names_list=self.name.lower(), include_disabled=False, exclude=None)[0]()._is_enabled() except: return False return self.enabled
def get_nan_sources(self, links_scraper, progressDialog): num_scrapers = len(nanscrapers.relevant_scrapers()) index = 0 string1 = control.lang(32406).encode('utf-8') counthd = 0 count1080 = 0 countSD = 0 for scraper_links in links_scraper(): try: if xbmc.abortRequested: return sys.exit() if progressDialog.iscanceled(): break index = index + 1 percent = int((index * 100) / num_scrapers) if scraper_links is not None: random.shuffle(scraper_links) for scraper_link in scraper_links: try: q = scraper_link['quality'] if "1080" in q: count1080 += 1 elif "HD" in q: counthd += 1 elif "720" in q: counthd += 1 scraper_link["quality"] = "HD" elif "720" in q: counthd += 1 scraper_link["quality"] = "HD" elif "560" in q: counthd += 1 scraper_link["quality"] = "HD" else: countSD += 1 except: pass progressDialog.update( percent, "Links Found:" "(" + str(len(self.sources)) + ")", string1 % (num_scrapers - index)) self.sources.append(scraper_link) try: if progressDialog.iscanceled(): break except: pass except: pass
import xbmcgui import os import xbmc import xbmcaddon import random import sys import urlparse import xbmcvfs from nanscrapers.common import clean_title from BeautifulSoup import BeautifulStoneSoup params = dict(urlparse.parse_qsl(sys.argv[2].replace('?', ''))) mode = params.get('mode') if mode == "DisableAll": scrapers = sorted( nanscrapers.relevant_scrapers(include_disabled=True), key=lambda x: x.name.lower()) for scraper in scrapers: key = "%s_enabled" % scraper.name xbmcaddon.Addon('script.module.nanscrapers').setSetting(key, "false") sys.exit() elif mode == "EnableAll": scrapers = sorted( nanscrapers.relevant_scrapers(include_disabled=True), key=lambda x: x.name.lower()) for scraper in scrapers: key = "%s_enabled" % scraper.name xbmcaddon.Addon('script.module.nanscrapers').setSetting(key, "true") sys.exit() try: from sqlite3 import dbapi2 as database
def get_sources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, timeout=30, preset="search", dialog=None, exclude=None, scraper_title=False, listitem=None, output_function=koding.Play_Video, skip_selector=False, player=None): """ scrapes for video sources using NaN scraper library Args: title: movie or episode title year: year movie/episode came out imdb: imdb identifier tvdb: tvdb identifier season: season number episode: episode number tvshowtitle: title of tv show premiered: year tv show premiered timeout: timeout for scraping link preset: preferred quality of stream dialog: dialog to use for displaying messages exclude: list of scrapers to exclude scraper_title: extra movie/tv show title to search first. required if scrapers use an alternate spelling Returns: Boolean indicating playback success """ year = str(year) content = 'movie' if tvshowtitle is None else 'episode' allow_debrid = ADDON.getSetting('allow_debrid') == "true" if ADDON.getSetting( 'use_link_dialog') == 'Link Selector' and not skip_selector: # use link selector if content == 'movie': scraper = nanscrapers.scrape_movie_with_dialog link, rest = scraper(title, year, imdb, timeout=timeout, exclude=exclude, extended=True, sort_function=Sources.sort_function, enable_debrid=allow_debrid) elif content == "episode": scraper = nanscrapers.scrape_episode_with_dialog link, rest = scraper(tvshowtitle, year, premiered, season, episode, imdb, tvdb, timeout=timeout, exclude=exclude, extended=True, sort_function=Sources.sort_function, enable_debrid=allow_debrid) else: return if type(link) == dict and "path" in link: link = link["path"] if link is None: return False url = link['url'] if ADDON.getSetting('link_fallthrough') == 'true': played = False index = 0 links = [] for item in rest: if type(item) == dict and "path" in item: links.extend(item["path"][1]) else: links.extend(item[1]) index = links.index(link) links = links[index + 1:] num_results = len(rest) + 1 while not played: try: if dialog is not None and dialog.iscanceled(): return False if dialog is not None: index = index + 1 percent = int((index * 100) / num_results) line = "%s - %s (%s)" % (link['scraper'], link['source'], link['quality']) dialog.update(percent, line) except: pass try: played = output_function(link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) link = links[0] links = links[1:] except: return False return played else: return output_function(url, showbusy=False, ignore_dp=True, item=listitem, player=player) # elif ADDON.getSetting('use_link_dialog') == 'Dialog+' and not skip_selector: else: if content == 'movie': title = title scraper = nanscrapers.scrape_movie links_scraper = scraper(title, year, imdb, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid) elif content == 'episode': if scraper_title: tvshowtitle = title tvshowtitle = tvshowtitle scraper = nanscrapers.scrape_episode links_scraper = scraper(tvshowtitle, year, premiered, season, episode, imdb, tvdb, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid) else: return sd_links = [] non_direct_links = [] non_direct_sd_links = [] num_scrapers = len(nanscrapers.relevant_scrapers()) index = 0 try: for scraper_links in links_scraper(): if dialog is not None and dialog.iscanceled(): return if dialog is not None: index = index + 1 percent = int((index * 100) / num_scrapers) dialog.update(percent) if scraper_links is not None: random.shuffle(scraper_links) for scraper_link in scraper_links: if dialog is not None and dialog.iscanceled(): return False if Sources().__check_skip_pairing(scraper_link): continue quality = Sources.__determine_quality( scraper_link["quality"]) preset = preset.lower() if preset == 'searchsd': if quality == "HD": continue elif preset == "search": if quality == "SD": sd_links.append(scraper_link) if scraper_link["direct"]: result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result else: non_direct_links.append(scraper_link) for scraper_link in non_direct_links: if dialog is not None and dialog.iscanceled(): return False result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result for scraper_link in sd_links: if dialog is not None and dialog.iscanceled(): return if scraper_link['direct']: result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result else: non_direct_sd_links.append(scraper_link) for scraper_link in non_direct_sd_links: if dialog is not None and dialog.iscanceled(): return result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result return False except: return False
def get_music_sources(title, artist, timeout=30, preset="search", dialog=None, exclude=None, listitem=None, output_function=koding.Play_Video, skip_selector=False, player=None): """ scrapes for music sources using NaN scraper library Args: title: song title artist: song artist timeout: timeout for scraping link preset: preferred quality of stream dialog: dialog to use for displaying messages exclude: list of scrapers to exclude Returns: Boolean indicating playback success """ title = title allow_debrid = ADDON.getSetting('allow_debrid') == "true" if ADDON.getSetting( 'use_link_dialog') == 'Link Selector' and not skip_selector: link, rest = nanscrapers.scrape_song_with_dialog( title, artist, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid, extended=True) if type(link) == dict and "path" in link: link = link["path"] if link is None: return False url = link['url'] if ADDON.getSetting('link_fallthrough') == 'true': played = False index = 0 links = [] for item in rest: if type(item) == dict and "path" in item: links.extend(item["path"][1]) else: links.extend(item[1]) index = links.index(link) links = links[index + 1:] num_results = len(rest) + 1 while not played: try: if dialog is not None and dialog.iscanceled(): return if dialog is not None: index = index + 1 percent = int((index * 100) / num_results) line = "%s - %s (%s)" % (link['scraper'], link['source'], link['quality']) dialog.update(percent, line) except: pass try: played = output_function(url, showbusy=False, ignore_dp=True, item=listitem, player=player) link = links[0] links = links[1:] except: return False return played else: return output_function(url, showbusy=False, ignore_dp=True, item=listitem, player=player) links_scraper = nanscrapers.scrape_song(title, artist, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid) sd_links = [] num_scrapers = len(nanscrapers.relevant_scrapers()) index = 0 try: for scraper_links in links_scraper(): if dialog is not None and dialog.iscanceled(): return if dialog is not None: index = index + 1 percent = int((index * 100) / num_scrapers) dialog.update(percent) if scraper_links is not None: random.shuffle(scraper_links) for scraper_link in scraper_links: if dialog is not None and dialog.iscanceled(): return if Sources().__check_skip_pairing(scraper_link): continue quality = Sources.__determine_quality( scraper_link["quality"]) preset = preset.lower() if preset == 'searchsd': if quality == "HD": continue elif preset == "search": if quality == "SD": sd_links.append(scraper_link) result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result for scraper_link in sd_links: if dialog is not None and dialog.iscanceled(): return result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result except: pass return False
def get_music_sources(title, artist, timeout=30, preset="search", dialog=None, exclude=None, listitem=None, output_function=koding.Play_Video): """ scrapes for music sources using NaN scraper library Args: title: song title artist: song artist timeout: timeout for scraping link preset: preferred quality of stream dialog: dialog to use for displaying messages exclude: list of scrapers to exclude Returns: Boolean indicating playback success """ title = title allow_debrid = ADDON.getSetting('allow_debrid') == "true" links_scraper = nanscrapers.scrape_song(title, artist, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid) sd_links = [] num_scrapers = len(nanscrapers.relevant_scrapers()) index = 0 for scraper_links in links_scraper(): if dialog is not None and dialog.iscanceled(): return if dialog is not None: index = index + 1 percent = int((index * 100) / num_scrapers) dialog.update(percent) if scraper_links is not None: random.shuffle(scraper_links) for scraper_link in scraper_links: if dialog is not None and dialog.iscanceled(): return if Sources().__check_skip_pairing(scraper_link): continue quality = Sources.__determine_quality( scraper_link["quality"]) preset = preset.lower() if preset == 'searchsd': if quality == "HD": continue elif preset == "search": if quality == "SD": sd_links.append(scraper_link) result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem) if result: return result for scraper_link in sd_links: if dialog is not None and dialog.iscanceled(): return result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem) if result: return result return False
def sources(self, url, hostDict, hostprDict): sources = [] try: debridHas = False if not debridHas: premiumize = debrid.Premiumize() debridHas = premiumize.accountEnabled() and premiumize.accountValid() if not debridHas: offcloud = debrid.OffCloud() debridHas = offcloud.accountEnabled() and offcloud.accountValid() if not debridHas: realdebrid = debrid.RealDebrid() debridHas = realdebrid.accountEnabled() and realdebrid.accountValid() if not debridHas: alldebrid = debrid.AllDebrid() debridHas = alldebrid.accountEnabled() and alldebrid.accountValid() if not debrid: rapidpremium = debrid.RapidPremium() debridHas = rapidpremium.accountEnabled() and rapidpremium.accountValid() data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) movie = False if 'tvshowtitle' in data else True title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] year = str(data['year']) if 'year' in data and not data['year'] == None else '' season = str(data['season']) if 'season' in data and not data['season'] == None else '' episode = str(data['episode']) if 'episode' in data and not data['episode'] == None else '' imdb = data['imdb'] if 'imdb' in data else '' tvdb = data['tvdb'] if 'tvdb' in data else '' scraper = nanscrapers.relevant_scrapers(names_list = self.name.lower(), include_disabled = True, exclude = None)[0]() if self.base_link and not self.base_link == '': scraper.base_link = self.base_link if movie: result = scraper.scrape_movie(title = title, year = year, imdb = imdb, debrid = debridHas) else: showYear = year try: if 'premiered' in data and not data['premiered'] == None and not data['premiered'] == '': for format in ['%Y-%m-%d', '%Y-%d-%m', '%d-%m-%Y', '%m-%d-%Y']: try: showYear = str(int(convert.ConverterTime(value = data['premiered'], format = format).string(format = '%Y'))) if len(showYear) == 4: break except: pass except: pass result = scraper.scrape_episode(title = title, year = year, show_year = showYear, season = season, episode = episode, imdb = imdb, tvdb = tvdb, debrid = debridHas) if result: for item in result: item['external'] = True item['language']= self.language[0] item['debridonly'] = False item['url'] = item['url'].replace('http:http:', 'http:').replace('https:https:', 'https:').replace('http:https:', 'https:').replace('https:http:', 'http:') # Some of the links start with a double http. # External providers (eg: "Get Out"), sometimes has weird characters in the URL. # Ignore the links that have non-printable ASCII or UTF8 characters. try: item['url'].decode('utf-8') except: continue source = item['source'].lower().replace(' ', '') if source == 'direct' or source == 'directlink': source = urlparse.urlsplit(item['url'])[1].split(':')[0] if network.Networker.ipIs(source): source = 'Anonymous' else: split = source.split('.') for i in split: i = i.lower() if i in ['www', 'ftp']: continue source = i break item['source'] = source sources.append(item) return sources except: tools.Logger.error() return sources
import nanscrapers import xbmcgui import os import xbmc import xbmcaddon import random import sys import urlparse import xbmcvfs from nanscrapers.common import clean_title from BeautifulSoup import BeautifulStoneSoup params = dict(urlparse.parse_qsl(sys.argv[2].replace('?', ''))) mode = params.get('mode') if mode == "DisableAll": scrapers = sorted(nanscrapers.relevant_scrapers(include_disabled=True), key=lambda x: x.name.lower()) for scraper in scrapers: key = "%s_enabled" % scraper.name xbmcaddon.Addon('script.module.nanscrapers').setSetting(key, "false") sys.exit() elif mode == "EnableAll": scrapers = sorted(nanscrapers.relevant_scrapers(include_disabled=True), key=lambda x: x.name.lower()) for scraper in scrapers: key = "%s_enabled" % scraper.name xbmcaddon.Addon('script.module.nanscrapers').setSetting(key, "true") sys.exit() try: from sqlite3 import dbapi2 as database
def get_sources(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, timeout=30, preset="search", dialog=None, exclude=None, scraper_title=False, listitem=None, output_function=koding.Play_Video, skip_selector=False, player=None): """ scrapes for video sources using NaN scraper library Args: title: movie or episode title year: year movie/episode came out imdb: imdb identifier tvdb: tvdb identifier season: season number episode: episode number tvshowtitle: title of tv show premiered: year tv show premiered timeout: timeout for scraping link preset: preferred quality of stream dialog: dialog to use for displaying messages exclude: list of scrapers to exclude scraper_title: extra movie/tv show title to search first. required if scrapers use an alternate spelling Returns: Boolean indicating playback success """ year = str(year) content = 'movie' if tvshowtitle is None else 'episode' allow_debrid = ADDON.getSetting('allow_debrid') == "true" if ADDON.getSetting('use_link_dialog') == 'true' and not skip_selector: # use link selector if content == 'movie': scraper = nanscrapers.scrape_movie_with_dialog link, rest = scraper( title, year, imdb, timeout=timeout, exclude=exclude, extended=True, sort_function=Sources.sort_function, enable_debrid=allow_debrid) elif content == "episode": scraper = nanscrapers.scrape_episode_with_dialog link, rest = scraper( tvshowtitle, year, premiered, season, episode, imdb, tvdb, timeout=timeout, exclude=exclude, extended=True, sort_function=Sources.sort_function, enable_debrid=allow_debrid) else: return if type(link) == dict and "path" in link: link = link["path"] if link is None: return False url = link['url'] if ADDON.getSetting('link_fallthrough') == 'true': played = False index = 0 links = [] for item in rest: if type(item) == dict and "path" in item: links.extend(item["path"][1]) else: links.extend(item[1]) index = links.index(link) links = links[index + 1:] num_results = len(rest) + 1 while not played: try: if dialog is not None and dialog.iscanceled(): return False if dialog is not None: index = index + 1 percent = int((index * 100) / num_results) line = "%s - %s (%s)" % (link['scraper'], link['source'], link['quality']) dialog.update(percent, line) except: pass try: played = output_function( link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) link = links[0] links = links[1:] except: return False return played else: return output_function( url, showbusy=False, ignore_dp=True, item=listitem, player=player) else: if content == 'movie': title = title scraper = nanscrapers.scrape_movie links_scraper = scraper( title, year, imdb, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid) elif content == 'episode': if scraper_title: tvshowtitle = title tvshowtitle = tvshowtitle scraper = nanscrapers.scrape_episode links_scraper = scraper( tvshowtitle, year, premiered, season, episode, imdb, tvdb, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid) else: return sd_links = [] non_direct_links = [] non_direct_sd_links = [] num_scrapers = len(nanscrapers.relevant_scrapers()) index = 0 try: for scraper_links in links_scraper(): if dialog is not None and dialog.iscanceled(): return if dialog is not None: index = index + 1 percent = int((index * 100) / num_scrapers) dialog.update(percent) if scraper_links is not None: random.shuffle(scraper_links) for scraper_link in scraper_links: if dialog is not None and dialog.iscanceled(): return False if Sources().__check_skip_pairing(scraper_link): continue quality = Sources.__determine_quality( scraper_link["quality"]) preset = preset.lower() if preset == 'searchsd': if quality == "HD": continue elif preset == "search": if quality == "SD": sd_links.append(scraper_link) if scraper_link["direct"]: result = output_function( scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result else: non_direct_links.append(scraper_link) for scraper_link in non_direct_links: if dialog is not None and dialog.iscanceled(): return False result = output_function( scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result for scraper_link in sd_links: if dialog is not None and dialog.iscanceled(): return if scraper_link['direct']: result = output_function( scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result else: non_direct_sd_links.append(scraper_link) for scraper_link in non_direct_sd_links: if dialog is not None and dialog.iscanceled(): return result = output_function( scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result return False except: return False
def get_music_sources(title, artist, timeout=30, preset="search", dialog=None, exclude=None, listitem=None, output_function=koding.Play_Video, skip_selector=False, player=None): """ scrapes for music sources using NaN scraper library Args: title: song title artist: song artist timeout: timeout for scraping link preset: preferred quality of stream dialog: dialog to use for displaying messages exclude: list of scrapers to exclude Returns: Boolean indicating playback success """ title = title allow_debrid = ADDON.getSetting('allow_debrid') == "true" if ADDON.getSetting('use_link_dialog') == 'true' and not skip_selector: link, rest = nanscrapers.scrape_song_with_dialog( title, artist, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid, extended=True) if type(link) == dict and "path" in link: link = link["path"] if link is None: return False url = link['url'] if ADDON.getSetting('link_fallthrough') == 'true': played = False index = 0 links = [] for item in rest: if type(item) == dict and "path" in item: links.extend(item["path"][1]) else: links.extend(item[1]) index = links.index(link) links = links[index + 1:] num_results = len(rest) + 1 while not played: try: if dialog is not None and dialog.iscanceled(): return if dialog is not None: index = index + 1 percent = int((index * 100) / num_results) line = "%s - %s (%s)" % (link['scraper'], link['source'], link['quality']) dialog.update(percent, line) except: pass try: played = output_function( url, showbusy=False, ignore_dp=True, item=listitem, player=player) link = links[0] links = links[1:] except: return False return played else: return output_function( url, showbusy=False, ignore_dp=True, item=listitem, player=player) links_scraper = nanscrapers.scrape_song( title, artist, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid) sd_links = [] num_scrapers = len(nanscrapers.relevant_scrapers()) index = 0 try: for scraper_links in links_scraper(): if dialog is not None and dialog.iscanceled(): return if dialog is not None: index = index + 1 percent = int((index * 100) / num_scrapers) dialog.update(percent) if scraper_links is not None: random.shuffle(scraper_links) for scraper_link in scraper_links: if dialog is not None and dialog.iscanceled(): return if Sources().__check_skip_pairing(scraper_link): continue quality = Sources.__determine_quality( scraper_link["quality"]) preset = preset.lower() if preset == 'searchsd': if quality == "HD": continue elif preset == "search": if quality == "SD": sd_links.append(scraper_link) result = output_function( scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result for scraper_link in sd_links: if dialog is not None and dialog.iscanceled(): return result = output_function( scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem, player=player) if result: return result except: pass return False
def get_music_sources(title, artist, timeout=30, preset="search", dialog=None, exclude=None, listitem=None, output_function=koding.Play_Video): """ scrapes for music sources using NaN scraper library Args: title: song title artist: song artist timeout: timeout for scraping link preset: preferred quality of stream dialog: dialog to use for displaying messages exclude: list of scrapers to exclude Returns: Boolean indicating playback success """ title = title allow_debrid = ADDON.getSetting('allow_debrid') == "true" links_scraper = nanscrapers.scrape_song( title, artist, timeout=timeout, exclude=exclude, enable_debrid=allow_debrid) sd_links = [] num_scrapers = len(nanscrapers.relevant_scrapers()) index = 0 for scraper_links in links_scraper(): if dialog is not None and dialog.iscanceled(): return if dialog is not None: index = index + 1 percent = int((index * 100) / num_scrapers) dialog.update(percent) if scraper_links is not None: random.shuffle(scraper_links) for scraper_link in scraper_links: if dialog is not None and dialog.iscanceled(): return if Sources().__check_skip_pairing(scraper_link): continue quality = Sources.__determine_quality( scraper_link["quality"]) preset = preset.lower() if preset == 'searchsd': if quality == "HD": continue elif preset == "search": if quality == "SD": sd_links.append(scraper_link) result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem) if result: return result for scraper_link in sd_links: if dialog is not None and dialog.iscanceled(): return result = output_function(scraper_link["url"], showbusy=False, ignore_dp=True, item=listitem) if result: return result return False