コード例 #1
0
 def __information(self, url):
     try:
         _shogi = _req.get(url, headers=fis_hed).text
         _bes = bs_(_shogi, 'html.parser')
         _data = _bes.find('div', class_='sbox')
         for data in _data.findAll('span', class_='dark_text'):
             self._info.append(data.text)
             self._tempo.append(data.nextSibling)
         try:
             _sinon = ''.join(ko.text for ko in bs_(
                 re.search(r'/></p><p>(.*?)\<br/>', _bes.decode()).group(),
                 'html.parser').find_all('p'))
         except AttributeError:
             exit(' {b}[{m}»{p}Neo{m}«{b}]{p} Can Not Find Sinopsys Anime')
         _Auth('clear')
         _sprint(__banner__)
         _sprint(f" {b}[{h}≋{b}].{p} Title: {_data.find('h1').text}\n"
                 f" {b}[{h}≋{b}].{p} Upload: {_data.findAll('p')[0].text}\n"
                 f" {b}[{h}≋{b}].{p} Synopis: {_sinon}")
         for _pis, pis in enumerate(self._info):
             _sprint(f' {b}[{h}≋{b}].{p}{pis}{self._tempo[_pis]}')
         input(f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Enter To Continue{m}!')
         self.__downloadPage(_bes)
     except _req.exceptions.ConnectionError:
         exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} No Internet Connection{m}!')
     except (EOFError, KeyboardInterrupt):
         exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} Passing{m}!')
コード例 #2
0
ファイル: irr_parser_new.py プロジェクト: austiuzhin/paper
def urls_for_items(url, proxie):
    data = requests.get(url, proxies=proxie)
    s_data = bs_(data.text, 'lxml')
    return [
        item.get('href') for item in s_data.find_all(
            "a", {"class": "listing__itemTitle js-productListingProductName"})
    ]
コード例 #3
0
 def _show_info(self, url):
     try:
         _shogi = _req.get(url, headers=fis_hed).text
         _bes = bs_(_shogi, 'html.parser')
         _Auth('clear')
         _sprint(__banner__)
         _sprint(
             f" {b}[{h}≋{b}]{p} Tittle {b}:{p} {_bes.find('div',class_='data').find('h1').text}\n"
             f" {b}[{h}≋{b}]{p} Type {b}:{p} {_bes.find('span',class_='calidad2').text}\n"
             f" {b}[{h}≋{b}]{p} Genre {b}:{p} {', '.join(c.text for c in _bes.findAll('a',rel='category'))}\n"
             f" {b}[{h}≋{b}]{p} Ratting {b}:{p} {_bes.find('span',itemprop='ratingValue').text}\n"
             f" {b}[{h}≋{b}]{p} Published {b}: {_bes.find('i',itemprop='datePublished').text}\n"
             f" {b}[{h}≋{b}]{p} Synopsis {b}:{p} {''.join(l.text for l in _bes.find('div',itemprop='description').findAll('p'))}"
         )
         for _lose in _bes.find('div', itemprop='description').findAll(
                 'span', class_='dark_text'):
             self.sin.append(_lose.text)
             self.sin_.append(_lose.nextSiblin)
         for cek_, c in enumerate(self.sin):
             _sprint(f' {b}[{h}≋{b}]{p} {self.sin[cek_]} {self.sin_[cek_]}')
         input(
             f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Press Enter To Continue{m}!{p} '
         )
         self._show_download(_bes)
     except _req.exceptions.ConnectionError:
         exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} No Internet Connection{m}!')
     except (EOFError, KeyboardInterrupt):
         exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} Passing{m}!')
コード例 #4
0
ファイル: irr_parser_new.py プロジェクト: austiuzhin/paper
def item_parser(url, estate_type, proxie):
    data = requests.get(url, proxies=proxie)
    s_data = bs_(data.text, 'lxml')
    metro_stations = out_of_csv('metro_.csv')
    name_node = s_data.find("h1", {"itemprop": "name"})
    try:
        additional_information_about_object_in_item = retrieving_additional_information_about_object_from_description(
            s_data, estate_type)
    except IOError:
        additional_information_about_object_in_item = None
    metro_description = s_data.find("div", class_=re.compile('_metro-'))
    metro_station_near_object = None
    if metro_description:
        for item in metro_stations:
            if item[0] in metro_description.text.strip():
                metro_station_near_object = item[0]
    else:
        metro_station_near_object = None

    adress_description = s_data.find(
        "div", {"class": "productPage__infoTextBold js-scrollToMap"})

    num_searcher = re.compile(r"[+-]?\d+(?:\.\d+)?")
    price_from_description = s_data.find("div", class_=re.compile('_price'))
    list_of_numbers_from_string = num_searcher.findall(
        price_from_description.text.strip(
        )) if price_from_description else None
    price = float(''.join(
        list_of_numbers_from_string)) if list_of_numbers_from_string else None

    date = date_retrieved_from_object_info(s_data)

    return {
        'type':
        estate_type,
        'obj_address':
        adress_description.text.strip() if adress_description else None,
        'metro_station':
        metro_station_near_object,
        'name':
        name_node.text.strip() if name_node else None,
        'area':
        additional_information_about_object_in_item.get('total_space')
        if additional_information_about_object_in_item else None,
        'rooms':
        additional_information_about_object_in_item.get('number_of_rooms')
        if additional_information_about_object_in_item else None,
        'floor':
        additional_information_about_object_in_item.get('floor_number')
        if additional_information_about_object_in_item else None,
        'price':
        price,
        'href':
        url,
        'source':
        'irr',
        'date':
        date,
    }
コード例 #5
0
def _ongoingEps(url):
    metadata = []
    tempo = []
    episode = []
    judul = []
    release = []
    sinopsis = []
    try:
        _shogi = _req.get(url, headers=fis_hed).text
    except _req.exceptions.ConnectionError:
        exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} No Internet Connection{m}!')
    _bes = bs_(_shogi, 'html.parser')
    _kintel = _bes.find('div', class_='spaceit_pad').find('span').next_sibling
    meta__ = _bes.findAll('div', class_='metadatac')
    for _meta in meta__:
        metadata.append(_meta.find('b').text)
        tempo.append(_meta.find('span').text)
    _desc = _bes.find('div', itemprop='description')
    for _des in _desc.find_all('p'):
        sinopsis.append(str(_des.text))
    _Auth('clear')
    _sprint(__banner__)
    for ii, ii_ in enumerate(metadata):
        _sprint(f' {b}[{p}≆{b}].{p} {ii_} {b}: {p}{tempo[ii]}')
    _sprint(f" {b}[{p}≆{b}].{p} Synopsis {b}:{p} {''.join(sinopsis)}")
    for _sinon in _desc.findAll('div', class_='spaceit_pad'):
        _sprint(f' {b}[{p}≆{b}].{p} {_sinon.text}')
    input(
        f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Press Enter To View Eps List {m}!{p} ')
    for _tamper in _bes.find_all('div', class_='episodiotitle'):
        judul.append(_tamper.find('a').get_text())
        episode.append(_tamper.find('a')["href"])
        release.append(_tamper.find('span', class_='date').get_text())
    first_ = _bes.find('span', class_='title').text
    _Auth('clear')
    _sprint(__banner__)
    _sprint(f' {m}⇵ {b}[{p} List Eps Of Anime {h}{first_} {b}] {m}⇵{p}\n')
    for var, var_ in enumerate(judul):
        _sprint(
            f' {b}[{p}{var+1}{b}].{p} {var_}\n   {h}▪ {p}Release At {m}:{b} {release[var]}{p}'
        )
    _chus = input(f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Choice {b}:{p} ')
    if _chus == '':
        print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Nothing Choice{m}!')
        exit()
    elif str(_chus) in _ascii:
        if str(_chus).lower() == 'n':
            pass
        elif str(_chus).lower() == 'p':
            pass
    elif str(_chus) not in _ascii:
        if int(_chus) - 1 < len(judul):
            _find_url(episode[int(_chus) - 1])
        else:
            print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Index Out Of Range{m}!')
            exit()
    else:
        print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Invalid Choice{m}!')
        exit()
コード例 #6
0
ファイル: irr_complex.py プロジェクト: austiuzhin/paper
def move_deep(url_):
	data = requests.get(url_)
	s_data = bs_(data.text,'lxml')
	next_depth_url = s_data.find(href="/real-estate/").get('href')
	if type(next_depth_url) == str:
		return next_depth_url
	else:
		return 'Link not found'
コード例 #7
0
ファイル: irr_parser_new.py プロジェクト: austiuzhin/paper
def retrieving_last_possible_page(url, proxie):
    data = requests.get(url, proxies=proxie)
    s_data = bs_(data.text, 'lxml')
    list_of_pages_from_pagination = s_data.find_all(
        "a", class_=re.compile('esLink'))
    last_page_from_pagination = list_of_pages_from_pagination[-1].text if len(
        list_of_pages_from_pagination) != 0 else None
    return int(
        last_page_from_pagination) if last_page_from_pagination else None
コード例 #8
0
def _SearchTitle(url):
    judul = []
    link = []
    if 'https://' not in url:
        url = (f"https://neonime.vip/?s={url.replace(' ','+')}")
        fis_random.append(url)
    else:
        url = url
    try:
        _shogi = _req.get(url, headers=fis_hed).text
    except _req.exceptions.ConnectionError:
        exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} No Internet Connection{m}!')
    _bes = bs_(_shogi, 'html.parser')
    box__ = _bes.find_all('div', class_='boxinfo episode')
    if len(box__) != 0:
        for _stmp in box__:
            judul.append(_stmp.find('span', class_='tt').get_text())
            link.append(_stmp.find('a')["href"])
        _Auth('clear')
        _sprint(__banner__)
        for va, va_ in enumerate(judul):
            _sprint(f' {b}[{p}{va+1}{b}].{p} {va_}')
        __prev = _bes.find('div', class_='pag_a').find('a')
        __next = _bes.find('div', class_='pag_b').find('a')
        if __next is not None:
            _sprint(
                f'\n\t  {b}[ {p}Type {b}[{p}N{b}]{p} For Next Type {b}[{p}P{b}]{p} For Prev {b}]{p} '
            )
        _chos = input(f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Choice {b}:{p} ')
        if _chos == '':
            print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Nothing Choice{m}!')
            exit()
        elif str(_chos) in _ascii:
            if str(_chos).lower() == 'n':
                _SearchTitle(__next["href"])
            elif str(_chos).lower() == 'p':
                if __prev is not None:
                    _SearchTitle(__prev["href"])
                else:
                    exit(
                        f'{b}[{m}»{p}Neo{m}«{b}]{p} Can Not Previous Fisrt Page Lol{m}!{p}'
                    )
        elif str(_chos) not in _ascii:
            if int(_chos) - 1 < len(judul):
                _find_url(link[int(_chos) - 1])
            else:
                print(
                    f' {b}[{m}»{p}Neo{m}«{b}]{p} Your Choice Out Of Range{m}!')
                exit()
        else:
            print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Nothing Choice{m}!')
            exit()
    else:
        print(
            f" {b}[{m}»{p}Neo{m}«{b}]{p} Tittle ``{' '.join(url.split('=')[-1].split('+'))}`` Not Found In Neonime{m}!"
        )
        exit()
コード例 #9
0
def _MoviesPage(url):
    fis_movies.append(url)
    _Judul = []
    Haref_ = []
    try:
        _shogi = _req.get(url, headers=fis_hed).text
        _bes = bs_(_shogi, 'html.parser')
        for _sao in _bes.findAll('div', class_='boxinfo'):
            _Judul.append(_sao.find('span', class_='tt').text)
            Haref_.append(_sao.find('a')["href"])
        Next_ = _bes.find('div', class_='pag_b').find('a')
        Prev_ = _bes.find('div', class_='pag_a').find('a')
        _Auth('clear')
        _sprint(__banner__)
        _sprint(
            f'  \t{m}≈ {b}[{p} List Of Anime Movies In Neonime.vip {b}] {m}≈\n'
        )
        for var, var_ in enumerate(_Judul):
            _sprint(f' {b}[{p}{var+1}{b}].{p} {var_}')
        if Next_ != None:
            _sprint(
                f'\n\t  {m}≈ {b}[{p} Type {m}[{p}N{m}]{p} For Next Type {m}[{p}P{m}]{p} For Prev {b}]{m} ≈'
            )
        _cus = input(f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Choice {b}:{p} ')
        if _cus == '':
            print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Nothing Choice{m}!')
            exit()
        elif str(_cus) in _ascii:
            if str(_cus).lower() == 'n':
                _MoviesPage(Next_["href"])
            elif str(_cus).lower() == 'p':
                if Prev_ != None:
                    _MoviesPage(Prev_["href"])
                else:
                    exit(
                        f' {b}[{m}»{p}Neo{m}«{b}]{p} Can Not Previous First Pages Lol{m}!{p}'
                    )
        elif str(_cus) not in _ascii:
            if int(_cus) - 1 < len(_Judul):
                _MoviesDownload(Haref_[int(_cus) - 1])
            else:
                print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Index Out Of Range{m}!')
                exit()
        else:
            print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Invalid Choice{m}!')
            exit()
    except _req.exceptions.ConnectionError:
        exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} No Internet Connection{m}!')
    except (EOFError, KeyboardInterrupt):
        exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} Passing{m}!')
コード例 #10
0
def _BatchPages(url):
    fis_batch.append(url)
    judul = []
    link = []
    try:
        _shogi = _req.get(url, headers=fis_hed).text
        _bes = bs_(_shogi, 'html.parser')
        for _Ape in _bes.findAll('div', class_='item'):
            judul.append(_Ape.find('span', class_='title').text)
            link.append(_Ape.find('a')["href"])
        Next_ = _bes.find('div', class_='pag_b').find('a')
        Prev_ = _bes.find('div', class_='pag_a').find('a')
        _Auth('clear')
        _sprint(__banner__)
        _sprint(
            f'  \t{m}≈ {b}[{p} List Of Batch Anime In Neonime.vip {b}] {m}≈\n')
        for _cek, cek in enumerate(judul):
            _sprint(f' {b}[{p}{_cek+1}{b}].{p} {cek}')
        if Next_ != None:
            _sprint(
                f'\n\t  {b}[ {p}Type {b}[{h}N{b}]{p} For Next Type {b}[{h}P{b}]{p} For Prev {b}]{p} '
            )
        cos_ = input(f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Choice {b}:{p} ')
        if cos_ == '':
            print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Nothing Choice{m}!')
            exit()
        elif str(cos_) in _ascii:
            if str(cos_).lower() == 'n':
                _BatchPages(Next_["href"])
            elif str(cos_).lower() == 'p':
                if Prev_ != None:
                    _BatchPages(Prev_["href"])
                else:
                    exit(
                        f'{b}[{m}»{p}Neo{m}«{b}]{p} Can Not Previous First Page Lol{m}!{p}'
                    )
        elif str(cos_) not in _ascii:
            if int(cos_) - 1 < len(judul):
                _BatchDownload(link[int(cos_) - 1])
            else:
                print(
                    f' {b}[{m}»{p}Neo{m}«{b}]{p} Your Choice Out Of Range{m}!')
                exit()
        else:
            exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} Invalid Choice{m}!')
    except _req.exceptions.ConnectionError:
        exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} No Internet Connection{m}!')
    except (EOFError, KeyboardInterrupt):
        exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} Passing{m}!')
コード例 #11
0
def _ongoingPage(url):
    fis_ongoing.append(url)
    judul = []
    link = []
    try:
        _shogi = _req.get(url, headers=fis_hed).text
    except _req.exceptions.ConnectionError:
        exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} No Internet Connection{m}!')
    _bes = bs_(_shogi, 'html.parser')
    _xbox = _bes.findAll('div', class_='boxinfo')
    for __oyasan in _xbox:
        judul.append(__oyasan.find('span', class_='tt').get_text())
        link.append(__oyasan.find('a')["href"])
    _Auth('clear')
    _sprint(__banner__)
    _sprint(f'  \t{m}≈ {b}[{p} List Of OnGoing Anime In Year 2020 {b}] {m}≈\n')
    for var, var_ in enumerate(judul):
        _sprint(f' {b}[{p}{var+1}{b}].{p} {var_}')
    Prev_ = _bes.find('div', class_='pag_a').find('a')
    Next_ = _bes.find('div', class_='pag_b').find('a')
    if Next_ is not None:
        _sprint(
            f'\n\t  {b}[{p} Type {b}[{p}N{b}]{p} For Next Type {b}[{p}P{b}]{p} For Prev {b}]{p} '
        )
    _chos = input(f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Choice {b}:{p} ')
    if _chos == '':
        print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Nothing Choice{m}!')
        exit()
    elif str(_chos) in _ascii:
        if str(_chos).lower() == 'n':
            _ongoingPage(Next_["href"])
        elif str(_chos).lower() == 'p':
            if Prev_ is not None:
                _ongoingPage(Prev_["href"])
            else:
                print(
                    f' {b}[{m}»{p}Neo{m}«{b}]{p} Can Not Previous First Pages Lol{m}!{p}'
                )
                exit()
    elif str(_chos) not in _ascii:
        if int(_chos) - 1 < len(judul):
            _ongoingEps(link[int(_chos) - 1])
        else:
            print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Index Out Of Range{m}!')
            exit()
    else:
        print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Invalid Choice{m}!')
        exit()
コード例 #12
0
ファイル: irr_parcer.py プロジェクト: austiuzhin/paper
def data_retr(url_):  #Retrieving data from HTML page
    data = requests.get(url_)
    s_data = bs_(data.text, 'html.parser')
    names = [(lambda x: x.text.strip())(item) for item in list(
        s_data.find_all(
            "a", {"class": "listing__itemTitle js-productListingProductName"}))
             ]
    sq_meters = [(lambda x: x.text.strip())(item) for item in list(
        s_data.find_all("div", {"class": "listing__itemColumn_param1"}))]
    floor = [(lambda x: x.text.strip())(item) for item in list(
        s_data.find_all("div", {"class": "listing__itemColumn_param2"}))]
    price = [
        (lambda x: float(re.search('[0-9]+', x.text.strip()).group(0)))(item)
        for item in list(
            s_data.find_all("div", {"class": "listing__itemPrice"}))
    ]
    return names, sq_meters, floor, price
コード例 #13
0
ファイル: irr_complex.py プロジェクト: austiuzhin/paper
def item_parser(url_):
	data = requests.get(url_)
	s_data = bs_(data.text, 'lxml')
	try:
		name = (lambda x: x.text.strip())(s_data.find("h1",{"itemprop":"name"}))
	except:
		name = 'Name is not received'
	
	try:
		characteristics = list()
		char_Block_list = s_data.find_all("div",{"class":"productPage__characteristicsBlock"})[0]
		for item in range(len(char_Block_list.contents)):
			if char_Block_list.contents[item] != '\n':
				characteristics.append(re.sub('\W+','',(char_Block_list.contents[item].span.text)))
		about_flat_dict = {id_:value for (id_,value) in enumerate(characteristics)}
	except:
		about_flat_dict = {"0": "Flat chracteristics not found. Probably this is not flat."}

	#living_rooms, sq_meters_whole, sq_meters_for_living, floor = characteristics # this is the furst try. 
																				  #When page is not a flat, 
																				  #for example, it is a room for 
																				  #rent or room for selling - this part gives 
																				  #a misteke. 
																				  #So, I've decided to create a dictionary and 
																				  #return all possible data in it.

	try:
		about_flat_tags_list = s_data.find_all("div",{"class":"productPage__infoColumnBlock"})[0]
		about_building_tags_list = s_data.find_all("div",{"class":"productPage__infoColumnBlock"})[1]

		more_data_about_flat_dict = {id_:value for (id_,value) in enumerate([item.text.strip() for item in (about_flat_tags_list.find_all("li",{"class":"productPage__infoColumnBlockText"}))])}
		more_data_about_building_dict = {id_:value for (id_,value) in enumerate([item.text.strip() for item in (about_building_tags_list.find_all("li",{"class":"productPage__infoColumnBlockText"}))])}
	except:
		more_data_about_flat_dict, more_data_about_building_dict = {'0':'No more data about apartment.'},{'0':'No more information about building.'}
	try:
		adress = (s_data.find("div",{"class":"productPage__infoTextBold js-scrollToMap"})).text.strip()
	except:
		adress = 'Adress is not received'
	
	'''
	Mod_2
	try:
		if s_data.find("div",{"class":"productPage__price js-contentPrice"}) == None:
			price = (lambda x: float(re.search('[0-9]+',x.text.strip()).group(0)))(s_data.find("div",{"class":"productPage__price"}))
		else:
			price = (lambda x: float(re.search('[0-9]+',x.text.strip()).group(0)))(s_data.find("div",{"class":"productPage__price js-contentPrice"}))
	except:
		price = float(0)
	'''

	'''
	Mod_1
	try:
		#price = (lambda x: float(re.search('[0-9]+',x.text.strip()).group(0)))(s_data.find("div",{"class":"productPage__price js-contentPrice"}))
		price = float((re.compile(r"[+-]?\d+(?:\.\d+)?")).search(re.sub('\W+','',s_data.find("div", class_=re.compile('_price')).text)).group(0))
	except:
		price = float(0)
	'''

	try:
		date = (s_data.find("div",{"class":"updateProduct"})).text.strip()
		#print (date)
		#date_updated, date_created = re.findall('([0-9]\s+\S+)\W+\S+\s+([0-9]+\s+\S+)',date)[0]
		#date_updated = re.findall('([0-9]\s+\S+)\W+',date)
		date_updated = re.sub('\W+','', date)
		#print (date_updated)
	except:
		date_updated = 'Date is not received'

	return name, adress, about_flat_dict, more_data_about_flat_dict, more_data_about_building_dict, price, date_updated#, date_created
コード例 #14
0
ファイル: irr_complex.py プロジェクト: austiuzhin/paper
def apartments_sale(url_):
	data = requests.get(url_)
	s_data = bs_(data.text, 'lxml')
	url_apartments = s_data.find(href=str(url_[:-1])+"/apartments-sale/").get('href')
	url_rooms = s_data.find(href=str(url_[:-1])+"/rooms-sale/").get('href')
	return url_apartments, url_rooms
コード例 #15
0
def _find_url(url):
    resolusi = []
    _data = []
    server = []
    link = []
    try:
        _shogi = _req.get(url, headers=fis_hed).text
    except _req.exceptions.ConnectionError:
        exit(f' {b}[{m}»{p}Neo{m}«{b}]{p} No Internet Connection{m}!')
    _bes = bs_(_shogi, 'html.parser')
    _box = _bes.find('div', class_='sbox')
    _tmp = _box.find_all('li')
    if len(_tmp) != 0:
        for clear in _tmp:
            if clear.get_text() == 'MP4':
                _tmp.remove(clear)
            elif clear.get_text() == 'MKV':
                _tmp.remove(clear)
            else:
                pass
            _data.append(clear)
        for _label in _box.find_all('label'):
            resolusi.append(_label.get_text())
        _Auth('clear')
        _sprint(__banner__)
        for var, var_ in enumerate(resolusi):
            _sprint(f' {b}[{p}{var+1}{b}].{p}{var_}')
        try:
            _chos = int(
                input(f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Resolusi {b}≽{p} '))
            if _chos == '':
                print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Nothing Choice{m}!')
                exit()
            elif (_chos - 1) < len(_data):
                for pico_ in _data[_chos].findAll('a'):
                    server.append(pico_.get_text())
                    link.append(pico_["href"])
            else:
                print(
                    f' {b}[{m}»{p}Neo{m}«{b}]{p} Your Choice Out Of Range{m}!')
                exit()
            for srv, srv_ in enumerate(server):
                _sprint(f' {b}[{p}{srv+1}{b}].{p} {srv_} {b}≽{p} {link[srv]}')
            _set = int(
                input(
                    f'\n {b}[{h}»{p}Neo{h}«{b}]{p} Open With Browser {b}:{p} ')
            )
            if _set == '':
                print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Nothing Choice{m}!')
                exit()
            elif (_set - 1) < len(link):
                _Auth(f'termux-open {link[_set-1]}')
                if len(fis_batch) != 0:
                    _BatchPages(fis_batch[0])
                    del fis_batch[:]
                elif len(fis_ongoing) != 0:
                    _ongoingPage(fis_ongoing[0])
                    del fis_ongoing[:]
                elif len(fis_movies) != 0:
                    _MoviesPage(fis_movies[0])
                    del fis_movies[:]
                else:
                    pass
            else:
                print(f' {b}[{m}»{p}Neo{m}«{b}]{p} Index Out Of Range{m}!')
                exit()
        except ValueError:
            print(
                f' {b}[{m}»{p}Neo{m}«{b}]{p} Your Choice Must Only Number{m}!')
            exit()
    if len(_bes.findAll('p', class_='smokeurl')) == 0:
        _MoviesDownload(url)
    if len(_bes.findAll('p', class_='smokeurl')) != 0:
        _BatchDownload(url)