예제 #1
0
	def parse_page(self,url,cookie=None):
		if cookie is None:
			content = ccurl(url)
		else:
			content = ccurl(url+'#-b#'+cookie)
		soup = BeautifulSoup(content,'lxml')
		arr = []
		m = soup.findAll('div',{'class':'item'})
		for i in m:
			k = i.find('a')['href']
			try:
				l = i.find('img')['alt']
				if l.startswith('.'):
					l = l[1:]
				if '/' in l:
					l = l.replace('/','-')
			except Exception as e:
				print(e)
				l = ''
			n = l+'	'+k
			arr.append(n)
		if not arr:
			if 'make sure your browser supports cookie' in content:
				arr.append('get_cookie')
		return arr
예제 #2
0
    def getEpnList(self, name, opt, depth_list, extra_info, siteName,
                   category):
        name_id = (re.search('id=[^\n]*', extra_info).group()).split('=')[1]
        #nm = name.rsplit('-',1)
        #name = nm[0]
        #name_id = nm[1]
        #name = nm[0]
        file_arr = []
        id_station = int(name_id)
        station_url = ''
        if opt == "TV" or '-TV' in name:
            url = "http://thugie.nl/streams.php?tunein=" + str(id_station)
            #content = ccurl(url,'',1)
            content = ccurl(url)
            final = re.findall('http://[^\n]*', content)
            station_url = final[0].rstrip()
            if 'stream.nsv' not in station_url:
                #print "Hello" + station_url
                station_url = str(station_url.rstrip() + ";stream.nsv")

        else:
            url = "https://www.shoutcast.com/Player/GetStreamUrl"
            #content = ccurl(url,id_station,2)
            post = 'station=' + str(id_station)
            content = ccurl(url + '#-d#' + post)
            m = re.findall('http://[^"]*', content)
            station_url = str(m[0])
        file_arr.append(name + '	' + station_url + '	' + 'NONE')
        #file_arr.append('No.jpg')
        #file_arr.append('Summary Not Available')
        record_history = True
        return (file_arr, 'Summary Not Available', 'No.jpg', record_history,
                depth_list)
예제 #3
0
 def getCompleteList(self, opt, genre_num):
     m = []
     url = None
     if opt == "<----":
         m = ["Genre", "History", 0]
     elif opt == "Genre":
         if self.genre:
             m = self.genre.copy()
         else:
             url = "http://www.shoutcast.com/"
             content = ccurl(url)
             m = re.findall('Genre[^"]name[^"]*', content)
             j = 0
             for i in m:
                 m[j] = re.sub('Genre[^"]name=', "", i)
                 m[j] = re.sub("[+]|%20", " ", m[j])
                 j = j + 1
             m.sort()
             print(m)
             self.genre = m.copy()
         m.append("<----")
         m.append(0)
     elif opt in ["History", "TV"]:
         pass
     else:
         url = "https://www.shoutcast.com/Home/BrowseByGenre"
         post = "genrename=" + opt
         content = ccurl(url + "#" + "-d" + "#" + post)
         m = self.process_page(content)
         m.append(1)
     print(opt, url)
     return m
예제 #4
0
 def getCompleteList(self, opt, genre_num):
     m = []
     url = None
     if opt == '<----':
         m = ['Genre', 'History', 0]
     elif opt == 'Genre':
         if self.genre:
             m = self.genre.copy()
         else:
             url = "http://www.shoutcast.com/"
             content = ccurl(url)
             m = re.findall('Genre[^"]name[^"]*', content)
             j = 0
             for i in m:
                 m[j] = re.sub('Genre[^"]name=', '', i)
                 m[j] = re.sub("[+]|%20", ' ', m[j])
                 j = j+1
             m.sort()
             print(m)
             self.genre = m.copy()
         m.append('<----')
         m.append(0)
     elif opt == 'History':
         pass
     elif opt == 'TV':
         pass
     else:
         url = "https://www.shoutcast.com/Home/BrowseByGenre"
         post = 'genrename='+opt
         content = ccurl(url+'#'+'-d'+'#'+post)
         m = self.process_page(content)
         m.append(1)
     print(opt, url)
     return m
예제 #5
0
    def getEpnList(self, name, opt, depth_list, extra_info, siteName,
                   category):
        if extra_info == '-1':
            arr = []
            return (arr, 'Instructions', 'No.jpg', False, depth_list)
        else:
            print(extra_info)
            name_id = (re.search('id=[^|]*', extra_info).group()).split('=')[1]
            url = "https://nyaa.si/download/" + name_id + '.torrent'
            print(url)
            summary = ""

            torrent_dest = os.path.join(siteName, name + '.torrent')

            if not os.path.exists(torrent_dest):
                ccurl(url + '#' + '-o' + '#' + torrent_dest, self.cookie_file)

            info = lt.torrent_info(torrent_dest)
            file_arr = []
            for f in info.files():
                file_path = f.path
                file_path = os.path.basename(file_path)
                file_arr.append(file_path)
            record_history = True
            return (file_arr, 'Summary Not Available', 'No.jpg',
                    record_history, depth_list)
예제 #6
0
 def parse_page(self, url, cookie=None):
     if cookie is None:
         content = ccurl(url)
     else:
         content = ccurl(url + '#-b#' + cookie)
     soup = BeautifulSoup(content, 'lxml')
     arr = []
     m = soup.findAll('div', {'class': 'item'})
     for i in m:
         k = i.find('a')['href']
         try:
             l = i.find('img')['alt']
             if l.startswith('.'):
                 l = l[1:]
             if '/' in l:
                 l = l.replace('/', '-')
         except Exception as e:
             print(e)
             l = ''
         n = l + '	' + k
         arr.append(n)
     if not arr:
         if 'make sure your browser supports cookie' in content:
             arr.append('get_cookie')
     return arr
예제 #7
0
 def ccurl_head(self, url, rfr_url):
     print("------------ccurlHead------------")
     if rfr_url:
         content = ccurl(url + '#' + '-Ie' + '#' + rfr_url)
     else:
         content = ccurl(url + '#' + '-I')
     return content
예제 #8
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		name_id = (re.search('id=[^\n]*',extra_info).group()).split('=')[1]
		#nm = name.rsplit('-',1)
		#name = nm[0]
		#name_id = nm[1]
		#name = nm[0]
		file_arr = []
		id_station = int(name_id)
		station_url = ''
		if opt == "TV" or '-TV' in name:
			url = "http://thugie.nl/streams.php?tunein="+str(id_station)
			#content = ccurl(url,'',1)
			content = ccurl(url)
			final = re.findall('http://[^\n]*',content)
			station_url = final[0].rstrip()
			if 'stream.nsv' not in station_url:
				#print "Hello" + station_url
				station_url = str(station_url.rstrip()+";stream.nsv")
			
			
		else:
			url = "https://www.shoutcast.com/Player/GetStreamUrl"
			#content = ccurl(url,id_station,2)
			post = 'station='+str(id_station)
			content = ccurl(url+'#-d#'+post)
			m = re.findall('http://[^"]*',content)
			station_url = str(m[0])
		file_arr.append(name+'	'+station_url+'	'+'NONE')
		#file_arr.append('No.jpg')
		#file_arr.append('Summary Not Available')
		record_history = True
		return (file_arr,'Summary Not Available','No.jpg',record_history,depth_list)
예제 #9
0
	def ccurlN(self,url):
		content = ccurl(url+'#-b#'+self.cookie_file)
		if 'checking_browser' in content:
			if os.path.exists(self.cookie_file):
				os.remove(self.cookie_file)
			cloudfare(url,'',self.cookie_file)
			content = ccurl(url+'#-b#'+self.cookie_file)
		return content
예제 #10
0
 def ccurlN(self, url):
     content = ccurl(url + '#-b#' + self.cookie_file)
     if 'checking_browser' in content:
         if os.path.exists(self.cookie_file):
             os.remove(self.cookie_file)
         cloudfare(url, '', self.cookie_file)
         content = ccurl(url + '#-b#' + self.cookie_file)
     return content
예제 #11
0
    def find_info(self, mode, row, thread=None, local_arr=None):
        name = ui.get_title_name(row)
        nam = self.name_adjust(name)
        nam1 = nam
        logger.info(nam)
        index = ""
        final_link_found = False
        final_link_arr = []
        if mode == 0:
            link = "http://thetvdb.com/index.php?seriesname=" + nam + "&fieldlocation=1&language=7&genre=Animation&year=&network=&zap2it_id=&tvcom_id=&imdb_id=&order=translation&addedBy=&searching=Search&tab=advancedsearch"
            logger.info(link)
            content = ccurl(link)
            m = re.findall('/index.php[^"]tab=[^"]*', content)
            if not m:
                link = "http://thetvdb.com/index.php?seriesname=" + nam + "&fieldlocation=2&language=7&genre=Animation&year=&network=&zap2it_id=&tvcom_id=&imdb_id=&order=translation&addedBy=&searching=Search&tab=advancedsearch"
                content = ccurl(link)
                m = re.findall('/index.php[^"]tab=[^"]*', content)
                if not m:
                    link = "http://thetvdb.com/?string=" + nam + "&searchseriesid=&tab=listseries&function=Search"
                    content = ccurl(link)
                    m = re.findall('/[^"]tab=series[^"]*lid=7', content)
                    if not m:
                        final_link_found = False
                    else:
                        final_link_found = True
                        final_link_arr = m
                else:
                    final_link_found = True
                    final_link_arr = m
            else:
                final_link_found = True
                final_link_arr = m
        elif mode == 1:
            final_link_arr = self.get_ddglinks(nam, src='tvdb')
            if final_link_arr:
                final_link_found = True
        elif mode == 2:
            final_link_arr = self.get_glinks(nam, src='tvdb')
            if final_link_arr:
                final_link_found = True

        logger.info(final_link_arr)
        if final_link_found and final_link_arr:
            n = re.sub('amp;', '', final_link_arr[0])
            elist = re.sub('tab=series', 'tab=seasonall', n)
            if not n.startswith('http'):
                url = "http://thetvdb.com" + n
                elist_url = "http://thetvdb.com" + elist
            else:
                url = n
                elist_url = elist
            site = ui.get_parameters_value(s='site')['site']
            self.getTvdbEpnInfo(elist_url,
                                epn_arr=local_arr,
                                site=site,
                                name=name,
                                row=row,
                                thread=thread)
예제 #12
0
    def getEpnList(self, name, opt, depth_list, extra_info, siteName,
                   category):
        if extra_info == '-1':
            arr = []
            return (arr, 'Instructions', 'No.jpg', False, depth_list)
        else:
            base_url = 'https://9anime.to'
            url = extra_info
            print(url, '--74--')
            content = ccurl(url)
            soup = BeautifulSoup(content, 'lxml')
            arr = []

            m = soup.findAll('div', {'class': 'server row'})
            p = 0
            index = 0
            print(m, len(m))
            for i in m:
                index = 0
                j = i.findAll('li')
                if p == 0:
                    for k in j:
                        l = k.find('a')
                        n = l.text + '	' + os.path.join(base_url, l['href'])
                        arr.append(n)
                else:
                    for k in j:
                        l = k.find('a')
                        try:
                            n = os.path.join(l['href'].split('/')[-1])
                        except Exception as e:
                            print(e, '--84--')
                            n = 'NONE'
                        try:
                            arr[index] = arr[index] + '::' + n
                        except Exception as e:
                            print(e, '--121---')
                        index = index + 1
                p = p + 1
            record_history = True
            display_list = True
            summary = 'Not Available'
            picn = 'No.jpg'

            try:
                m = soup.find('h1', {'class': 'title'})
                pic_url = m.findNext('img')['src']
                l = m.findNext('div', {'id': 'info'})
                summary = m.text.strip() + '\n' + l.text.strip()
                picn = os.path.join(self.tmp_dir, name + '.jpg')
                if not os.path.exists(picn):
                    ccurl(pic_url + '#' + '-o' + '#' + picn)
                print(picn, '--98--')
            except Exception as e:
                print(e)
            return (arr, summary, picn, record_history, depth_list)
예제 #13
0
 def run(self):
     ccurl(self.url)
     try:
         self.picn = self.url.split('#')[2]
         ui.image_fit_option(self.picn,
                             self.picn,
                             fit_size=6,
                             widget=ui.label)
     except Exception as e:
         print(e)
예제 #14
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		if extra_info == '-1':
			arr = []
			return (arr,'Instructions','No.jpg',False,depth_list)
		else:
			base_url = 'https://9anime.to'
			url = extra_info
			print(url,'--74--')
			content = ccurl(url)
			soup = BeautifulSoup(content,'lxml')
			arr = []
			
			m = soup.findAll('div',{'class':'server row'})
			p = 0
			index = 0
			print(m,len(m))
			for i in m:
				index = 0
				j = i.findAll('li')
				if p == 0:
					for k in j:
						l = k.find('a')
						n = l.text+'	'+os.path.join(base_url,l['href'])
						arr.append(n)
				else:
					for k in j:
						l = k.find('a')
						try:
							n = os.path.join(l['href'].split('/')[-1])
						except Exception as e:
							print(e,'--84--')
							n = 'NONE'
						try:
							arr[index] = arr[index]+'::'+n
						except Exception as e:
							print(e,'--121---')
						index = index + 1
				p = p +1
			record_history = True
			display_list = True
			summary = 'Not Available'
			picn = 'No.jpg'
			
			try:
				m = soup.find('h1',{'class':'title'})
				pic_url = m.findNext('img')['src']
				l = m.findNext('div',{'id':'info'})
				summary = m.text.strip()+'\n'+l.text.strip()
				picn = os.path.join(self.tmp_dir,name+'.jpg')
				if not os.path.exists(picn):
					ccurl(pic_url+'#'+'-o'+'#'+picn)
				print(picn,'--98--')
			except Exception as e:
				print(e)
			return (arr,summary,picn,record_history,depth_list)
예제 #15
0
    def getEpnList(self, name, opt, depth_list, extra_info, siteName,
                   category):
        url = "http://anime-joy.tv/watch/" + name
        print(url)
        summary = ""
        content = ccurl(url)
        soup = BeautifulSoup(content, 'lxml')
        link = soup.findAll('div', {"class": 'ozet'})
        link1 = soup.findAll('img')
        img = ""
        for i in link:
            summary = i.text
            #summary = re.sub("\n","",summary)
        if not summary:
            summary = "Summary Not Available"
        else:
            m = re.findall(r'\\n', summary)
            print(m)
            n = re.findall(r'\\t', summary)
            for i in m:
                summary = summary.replace(i, '')
            for i in n:
                summary = summary.replace(i, '')
            print(summary)

        for i in link1:
            if 'src' in str(i):
                j = i['src']
                if j and '.jpg' in j:
                    img = j
                    img = img.replace('animejoy.tv', 'anime-joy.tv')
                    print(img)
        #picn = "/tmp/" + name + ".jpg"
        picn = os.path.join(self.tmp_dir, name + '.jpg')
        try:
            if not os.path.isfile(picn) and img:
                #subprocess.call(["curl","-o",picn,img])
                ccurl(img + '#' + '-o' + '#' + picn)
        except:
            print("No Cover")
        m = re.findall('http://anime-joy.tv/watch/' + name + '/[^"]*', content)
        j = 0
        for i in m:
            i = re.sub('http://anime-joy.tv/watch/' + name + '/', "", i)
            m[j] = i
            j = j + 1
        m = naturallysorted(m)
        #m.append(picn)
        #m.append(summary)
        record_history = True
        return (m, summary, picn, record_history, depth_list)
예제 #16
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		url = "http://anime-joy.tv/watch/" + name
		print(url)
		summary = ""
		content = ccurl(url)
		soup = BeautifulSoup(content,'lxml')
		link = soup.findAll('div', { "class" : 'ozet' })
		link1 = soup.findAll('img')
		img=""
		for i in link:
			summary = i.text
			#summary = re.sub("\n","",summary)
		if not summary:
			summary = "Summary Not Available"
		else:
			m = re.findall(r'\\n',summary)
			print(m)
			n = re.findall(r'\\t',summary)
			for i in m:
				summary = summary.replace(i,'')
			for i in n:
				summary = summary.replace(i,'')
			print(summary)
			
		for i in link1:
			if 'src' in str(i):
				j = i['src']
				if j and '.jpg' in j:
					img = j
					img = img.replace('animejoy.tv','anime-joy.tv')
					print(img)
		#picn = "/tmp/" + name + ".jpg"
		picn = os.path.join(self.tmp_dir,name+'.jpg')
		try:
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-o",picn,img])
				ccurl(img+'#'+'-o'+'#'+picn)
		except:
			print("No Cover")
		m = re.findall('http://anime-joy.tv/watch/'+name+'/[^"]*',content)
		j=0
		for i in m:
			i = re.sub('http://anime-joy.tv/watch/'+name+'/',"",i)
			m[j] = i
			j = j + 1
		m=naturallysorted(m)  
		#m.append(picn)
		#m.append(summary)
		record_history = True
		return (m,summary,picn,record_history,depth_list)
예제 #17
0
    def getEpnList(self, name, opt, depth_list, extra_info, siteName,
                   category):
        url = "http://www.animebam.net/series/" + name
        img = []
        summary = ""
        content = ccurl(url)
        soup = BeautifulSoup(content, 'lxml')
        link = soup.find('p', {'class': 'ptext'})
        if link:
            summary = link.text
        link = soup.findAll('img')
        for i in link:
            if 'src' in str(i):
                j = i['src']
                if 'jpg' in j or 'jpeg' in j:
                    img_src = j
                    if 'http' not in img_src:
                        img_src = 'http:' + img_src
                    img.append(img_src)

        print(img)

        #picn = "/tmp/AnimeWatch/" + name + ".jpg"
        picn = os.path.join(self.tmp_dir, name + '.jpg')
        try:
            if not os.path.isfile(picn):
                #subprocess.call(["curl",'-L','-A',self.hdr,'-o',picn,img[0]])
                ccurl(img[0] + '#' + '-o' + '#' + picn)
        except:
            print("No Cover")

        if not summary:
            summary = "No Summary Available"
        n = re.findall(name + '-[^"]*', content)
        n = naturallysorted(n)
        m = []
        sub = soup.findAll('i', {'class': 'btn-xs btn-subbed'})
        if sub:
            for i in n:
                m.append(i + '-subbed')
        dub = soup.findAll('i', {'class': 'btn-xs btn-dubbed'})
        if dub:
            for i in n:
                m.append(i + '-dubbed')
        #m.append(picn)
        #m.append(summary)
        record_history = True
        return (m, summary, picn, record_history, depth_list)
예제 #18
0
 def getEpnList(self, name, opt, depth_list, extra_info, siteName,
                category):
     summary = "None"
     picn = "No.jpg"
     record_history = False
     print(self.site, self.opt, opt)
     if self.site:
         if self.site.lower() == "playlists":
             opt_val = name
             name_val = ""
         else:
             opt_val = self.opt.lower()
             name_val = name
         if self.site.lower() == "video" or self.site.lower() == "music":
             name_val = extra_info + ".hash"
         url_new = "site={0}&opt={1}&s={2}&exact.m3u".format(
             self.site.lower(), opt_val, name_val)
         url_new = urllib.parse.quote(url_new)
         url = self.url + url_new
         content = ccurl(url + "#" + "-b" + "#" + self.cookie_file,
                         verify_peer=False)
         m = self.get_playlist(content)
         record_history = True
     elif self.opt == "Discover":
         self.handle_login(server_name=name)
         m = []
         record_history = False
     else:
         m = []
     return (m, summary, picn, record_history, depth_list)
예제 #19
0
    def urlHeaders(self, url):
        m = []
        o = []
        content = ccurl(url + '#' + '-I')
        n = content.split('\n')
        #print(n)
        k = 0
        for i in n:
            i = re.sub('\r', '', i)
            if i and ':' in i:
                p = i.split(': ', 1)
                if p:
                    t = (p[0], p[1])
                else:
                    t = (i, "None")

                m.append(t)
                #print(m,'-----')
                k = k + 1
                #print(k)
            else:
                t = (i, '')
                m.append(t)
        d = dict(m)
        print(d)
        #result = int(int(d['Content-Length'])/(1024*1024))
        return d
예제 #20
0
 def get_ddglinks(self, nam, src=None):
     m = []
     if src == 'tvdb' or src == 'tvdb+ddg':
         new_url = 'https://duckduckgo.com/html/?q=' + nam + '+tvdb'
     elif src == 'tmdb' or src == 'tmdb+ddg':
         new_url = 'https://duckduckgo.com/html/?q=' + nam + '+themoviedb'
     content = ccurl(new_url)
     soup = BeautifulSoup(content, 'lxml')
     div_val = soup.findAll('h2', {'class': 'result__title'})
     logger.info(div_val)
     for div_l in div_val:
         new_url = div_l.find('a')
         if 'href' in str(new_url):
             new_link = new_url['href']
             final_link = re.search('http[^"]*', new_link).group()
             if src == 'tvdb' or src == 'tvdb+ddg':
                 if ('tvdb.com' in final_link
                         and 'tab=episode' not in final_link
                         and 'tab=seasonall' not in final_link):
                     m.append(final_link)
             elif src == 'tmdb' or src == 'tmdb+ddg':
                 if 'themoviedb.org' in final_link:
                     m.append(final_link)
             if m:
                 break
     return m
예제 #21
0
	def urlHeaders(self,url):
		m =[]
		o = []
		content = ccurl(url+'#'+'-I')
		n = content.split('\n')
		#print(n)
		k = 0
		for i in n:
			i = re.sub('\r','',i)
			if i and ':' in i:
				p = i.split(': ',1)
				if p:
					t = (p[0],p[1])
				else:
					t = (i,"None")
				
				m.append(t)
				#print(m,'-----')
				k = k+1
				#print(k)
			else:
				t = (i,'')
				m.append(t)
		d = dict(m)
		print(d)
		#result = int(int(d['Content-Length'])/(1024*1024))
		return d
예제 #22
0
 def urlHeaders(self, url):
     m = []
     o = []
     content = ccurl(url, curl_opt='-I')
     n = content.split('\n')
     #print(n)
     k = 0
     for i in n:
         i = re.sub('\r', '', i)
         if i and ':' in i:
             p = i.split(': ', 1)
             if p:
                 t = (p[0], p[1])
             else:
                 t = (i, "None")
             m.append(t)
             #print(m, '-----')
             k = k+1
             #print(k)
         else:
             t = (i, '')
             m.append(t)
     d = dict(m)
     print(d)
     return d
예제 #23
0
 def getEpnList(self, name, opt, depth_list, extra_info, siteName, category):
     summary = 'None'
     picn = 'No.jpg'
     record_history = False
     print(self.site, self.opt, opt)
     if self.site:
         if self.site.lower() == 'playlists':
             opt_val = name
             name_val = ''
         else:
             opt_val = self.opt.lower()
             name_val = name
         if self.site.lower() == 'video' or self.site.lower() == 'music':
             name_val = extra_info+'.hash'
         url_new = 'site={0}&opt={1}&s={2}&exact.m3u'.format(self.site.lower(), opt_val, name_val)
         url_new = urllib.parse.quote(url_new)
         url = self.url+url_new
         content = ccurl(url+'#'+'-b'+'#'+self.cookie_file, verify_peer=False)
         m = self.get_playlist(content)
         record_history = True
     elif self.opt == 'Discover':
         self.handle_login(server_name=name)
         m = []
         record_history = False
     else:
         m = []
     return (m, summary, picn, record_history, depth_list)
예제 #24
0
 def handle_login(self, server_name=None):
     if os.path.isfile(self.server_list) and not server_name:
         with open(self.server_list, 'r') as f:
             self.server_name = f.read()
     elif server_name:
         self.server_name = server_name
     if not self.url:
         self.login_widget = LoginWidget(server=self)
         self.login_widget.show()
         #self.login_widget.setWindowModality(QtCore.Qt.WindowModal)
     else:
         content = ccurl(
             '{0}get_all_category.htm#-c#{1}'.format(self.url, self.cookie_file),
             user_auth=self.passwd, verify_peer=False
             )
         print(content, '>>>>>')
         if ('Access Not Allowed, Authentication Failed' in content 
                 or 'You are not authorized to access the content' in content):
             self.login_success = False
             self.login_widget = LoginWidget(server=self)
             self.login_widget.show()
         elif not content:
             send_notification('Curl failure: may be server is not running or misconfigured')
         else:
             self.login_success = True
예제 #25
0
 def handleLogin(self):
     self.hide()
     text_val = self.text_name.text()
     pass_val = self.text_pass.text()
     self.auth_info = text_val+':'+pass_val
     url = self.server_ip.text()
     if url:
         if not url.endswith('/'):
             url = url+'/'
         if not url.startswith('http'):
             send_notification('Enter full IP address starting with http/https properly')
         else:
             content = ccurl(
                     '{0}get_all_category.htm#-c#{1}'.format(url, self.server.cookie_file),
                     user_auth=self.auth_info, verify_peer=False
                     )
             print(content, '>>>>>')
             if ('Access Not Allowed, Authentication Failed' in content or
                     'You are not authorized to access the content' in content):
                 self.server.login_success = False
                 send_notification('Authentication Failed. Either Username or Password is incorrect')
             elif not content:
                 send_notification('Curl failure: may be server is not running or misconfigured')
             else:
                 self.server.passwd = self.auth_info
                 self.server.url = url
                 self.server.login_success = True
                 send_notification('Login Success. Now click on Login Again')
                 with open(self.server.server_list, 'w') as f:
                     f.write(self.server.url) 
                     self.server.server_name = url
     else:
         send_notification('Server IP Address Needed')
예제 #26
0
 def get_glinks(self, nam, src=None):
     if src == 'tmdb+g':
         url = "https://www.google.co.in/search?q=" + nam + "+themoviedb"
     else:
         url = "https://www.google.co.in/search?q=" + nam + "+tvdb"
     content = ccurl(url)
     soup = BeautifulSoup(content, 'lxml')
     m = soup.findAll('a')
     links = []
     for i in m:
         if 'href' in str(i):
             x = urllib.parse.unquote(i['href'])
             y = ''
             src = 'tvdb'
             if 'thetvdb.com' in x:
                 y = re.search('thetvdb.com[^"]*tab=series[^"]*', x)
                 src = 'tvdb'
             elif 'themoviedb.org' in x:
                 y = re.search('www.themoviedb.org[^"]*', x)
                 src = 'tmdb'
             if y:
                 y = y.group()
                 if src == 'tvdb':
                     y = 'http://' + y
                 else:
                     y = 'https://' + y
                 y = urllib.parse.unquote(y)
                 y = y.replace(' ', '%20')
                 y = re.sub('\&sa[^"]*', '', y)
                 links.append(y)
     return links
예제 #27
0
 def ddg_search(self, nam, src, direct_search=None):
     m = []
     final_link = ''
     if direct_search:
         if src == 'tmdb':
             new_url = 'https://www.themoviedb.org/search?query=' + nam
             content = ccurl(new_url)
             soup = BeautifulSoup(content, 'lxml')
             div_link = soup.find('div', {'class': 'item poster card'})
             if div_link:
                 alink = div_link.find('a')
                 if 'href' in str(alink):
                     link = alink['href']
                     if link.startswith('/'):
                         final_link = 'https://www.themoviedb.org' + link
                     elif link.startswith('http'):
                         final_link = link
                     else:
                         final_link = 'https://www.themoviedb.org/' + link
                     m.append(final_link)
         elif src == 'tvdb+g' or src == 'tmdb+g':
             m = self.get_glinks(nam, src)
         elif src == 'tvdb+ddg' or src == 'tmdb+ddg':
             m = self.get_ddglinks(nam, src)
     else:
         m = self.get_ddglinks(nam, src)
     if m:
         final_link = m[0]
     logger.info('\n{0}---{1}\n'.format(final_link, m))
     return (final_link, m)
예제 #28
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		url = "http://www.animebam.net/series/" + name
		img = []
		summary = ""
		content = ccurl(url)
		soup = BeautifulSoup(content,'lxml')
		link = soup.find('p',{'class':'ptext'})
		if link:
			summary = link.text
		link = soup.findAll('img')
		for i in link:
			if 'src' in str(i):
				j = i['src']
				if 'jpg' in j or 'jpeg' in j:
					img_src = j
					if 'http' not in img_src:
						img_src = 'http:'+img_src
					img.append(img_src)
		
		print(img)
		
		#picn = "/tmp/AnimeWatch/" + name + ".jpg"
		picn = os.path.join(self.tmp_dir,name+'.jpg')
		try:
			if not os.path.isfile(picn):
				#subprocess.call(["curl",'-L','-A',self.hdr,'-o',picn,img[0]])
				ccurl(img[0]+'#'+'-o'+'#'+picn)
		except:
			print("No Cover")
		
		if not summary:
			summary = "No Summary Available"
		n = re.findall(name+'-[^"]*',content)
		n=naturallysorted(n)  
		m = []
		sub = soup.findAll('i',{'class':'btn-xs btn-subbed'})
		if sub:
			for i in n:
				m.append(i+'-subbed')
		dub = soup.findAll('i',{'class':'btn-xs btn-dubbed'})
		if dub:
			for i in n:
				m.append(i+'-dubbed')
		#m.append(picn)
		#m.append(summary)
		record_history = True
		return (m,summary,picn,record_history,depth_list)
예제 #29
0
 def run(self):
     logger.info(self.path)
     if not os.path.exists(self.picn) and self.path:
         try:
             if (self.path.startswith('http') and
                 (self.path.endswith('.jpg') or self.path.endswith('.png')
                  or self.path.endswith('.image'))):
                 ccurl(self.path + '#' + '-o' + '#' + self.picn)
                 ui.image_fit_option(self.picn,
                                     self.picn,
                                     fit_size=6,
                                     widget=ui.label)
             else:
                 ui.generate_thumbnail_method(self.picn, self.inter,
                                              self.path)
         except Exception as e:
             logger.info("Thumbnail Generation Exception: {0}".format(e))
             print(e, '--548--')
예제 #30
0
    def getCompleteList(self, opt, ui, progress, tmp_dir, hist_folder):
        m = ["Not Able To Open"]
        if opt == "Open":
            MainWindow = QtWidgets.QWidget()
            item, ok = QtWidgets.QInputDialog.getText(
                ui,
                "Input Dialog",
                "Enter Torrent Url or Magnet Link or local torrent file path",
            )
            if ok and item:
                if item.startswith("http") or (os.path.isfile(item)
                                               and item.endswith(".torrent")):
                    home = hist_folder
                    name1 = os.path.basename(item).replace(".torrent", "")
                    torrent_dest1 = os.path.join(tmp_dir, name1 + ".torrent")
                    if not os.path.exists(torrent_dest1):
                        if item.startswith("http"):
                            ccurl(item + "#" + "-o" + "#" + torrent_dest1)
                        else:
                            shutil.copy(item, torrent_dest1)
                    if os.path.exists(torrent_dest1):
                        info = lt.torrent_info(torrent_dest1)
                        name = info.name()
                        torrent_dest = os.path.join(home, name + ".torrent")
                        shutil.copy(torrent_dest1, torrent_dest)
                    m = [name]
                elif item.startswith("magnet:"):

                    torrent_handle, stream_session, info = get_torrent_info_magnet(
                        item, tmp_dir, ui, progress, tmp_dir)
                    torrent_file = lt.create_torrent(info)

                    home = hist_folder
                    name = info.name()
                    torrent_dest = os.path.join(home, name + ".torrent")

                    with open(torrent_dest, "wb") as f:
                        f.write(lt.bencode(torrent_file.generate()))

                    torrent_handle.pause()
                    stream_session.pause()
                    m = [name]
        m.append(1)
        return m
예제 #31
0
 def search(self, name):
     url = "http://www.animebam.net/search?search=" + name
     content = ccurl(url)
     m = re.findall('/series/[^"]*', content)
     #print m
     j = 0
     for i in m:
         m[j] = re.sub("/series/", "", i)
         j = j + 1
     return m
예제 #32
0
 def getFinalUrl(self, name, epn, mirror, quality):
     if self.url:
         url = self.url + "quality=" + quality
         content = ccurl("{0}#-b#{1}".format(url, self.cookie_file),
                         verify_peer=False)
         print(content)
     final = epn
     if "\t" in epn:
         final = epn.split("\t")[1]
     return final
예제 #33
0
 def getFinalUrl(self, name, epn, mirror, quality):
     if self.url:
         url = self.url + 'quality=' + quality
         content = ccurl('{0}#-b#{1}'.format(url, self.cookie_file),
                         verify_peer=False)
         print(content)
     final = epn
     if '\t' in epn:
         final = epn.split('\t')[1]
     return final
예제 #34
0
	def search(self,name):
		url = "http://www.animebam.net/search?search=" + name
		content = ccurl(url)
		m = re.findall('/series/[^"]*',content)
		#print m
		j=0
		for i in m:
			m[j]=re.sub("/series/","",i)
			j = j+1
		return m
예제 #35
0
    def getCompleteList(self, opt, ui, progress, tmp_dir, hist_folder):
        m = ['Not Able To Open']
        if opt == 'Open':
            MainWindow = QtWidgets.QWidget()
            item, ok = QtWidgets.QInputDialog.getText(
                MainWindow, 'Input Dialog',
                'Enter Torrent Url or Magnet Link or local torrent file path')
            if ok and item:
                if (item.startswith('http') or
                    (os.path.isfile(item) and item.endswith('.torrent'))):
                    home = hist_folder
                    name1 = os.path.basename(item).replace('.torrent', '')
                    torrent_dest1 = os.path.join(tmp_dir, name1 + '.torrent')
                    if not os.path.exists(torrent_dest1):
                        if item.startswith('http'):
                            ccurl(item + '#' + '-o' + '#' + torrent_dest1)
                        else:
                            shutil.copy(item, torrent_dest1)
                    if os.path.exists(torrent_dest1):
                        info = lt.torrent_info(torrent_dest1)
                        name = info.name()
                        torrent_dest = os.path.join(home, name + '.torrent')
                        shutil.copy(torrent_dest1, torrent_dest)
                    m = [name]
                elif item.startswith('magnet:'):

                    torrent_handle, stream_session, info = get_torrent_info_magnet(
                        item, tmp_dir, ui, progress, tmp_dir)
                    torrent_file = lt.create_torrent(info)

                    home = hist_folder
                    name = info.name()
                    torrent_dest = os.path.join(home, name + '.torrent')

                    with open(torrent_dest, "wb") as f:
                        f.write(lt.bencode(torrent_file.generate()))

                    torrent_handle.pause()
                    stream_session.pause()
                    m = [name]
        m.append(1)
        return m
예제 #36
0
 def search(self, name):
     strname = str(name)
     print(strname)
     if name.lower() == "tv":
         m = self.getCompleteList(name.upper(), 1)
     else:
         url = "https://www.shoutcast.com/Home/BrowseByGenre"
         post = "genrename=" + name
         content = ccurl(url + "#" + "-d" + "#" + post)
         m = self.process_page(content)
     return m
예제 #37
0
    def getFinalUrl(self, name, epn, mirror, quality):
        url = "http://anime-joy.tv/watch/" + name + "/" + epn
        print(url)
        content = ccurl(url)
        try:
            m = re.findall('http:[^"]*.mp4', content)
            url = m[0]
            #print("in try url:" + url
        except:
            m = re.findall('http://anime-joy.tv/embed[^"]*', content)
            #m = list(set(m))

            content = ccurl(m[0])

            m = re.findall('http[^"]*.mp4', content)
            #m = list(set(m))
            #print(m
            url = m[0]
        print(url)

        return url
예제 #38
0
	def getFinalUrl(self,name,epn,mirror,quality):
		url = "http://anime-joy.tv/watch/"+name+"/"+epn
		print(url)
		content = ccurl(url)
		try:
			m = re.findall('http:[^"]*.mp4',content)
			url = m[0]
			#print("in try url:" + url 
		except:
			m = re.findall('http://anime-joy.tv/embed[^"]*',content)
			#m = list(set(m))
		
			content = ccurl(m[0])
			
			m = re.findall('http[^"]*.mp4',content)
			#m = list(set(m))
			#print(m
			url = m[0]
		print(url)
		
		return url
예제 #39
0
	def search(self,name):
		strname = str(name)
		print(strname)
		if name.lower() == 'tv':
			m = self.getCompleteList(name.upper(),1)
		else:
			url = "https://www.shoutcast.com/Home/BrowseByGenre"
			#content = ccurl(url,name,1)
			post = "genrename="+name
			content = ccurl(url+'#'+'-d'+'#'+post)
			m = self.process_page(content)
		return m
예제 #40
0
 def get_new_server(self, nurl, quality):
     final = ''
     if 'mycloud' in nurl:
         content = ccurl(nurl)
         tlink = re.search('"file":"[^"]*', content).group()
         link = tlink.replace('"file":"', '', 1)
         if 'http' not in link:
             link = 'https://' + re.search('[a-zA-Z0-9][^"]*', link).group()
         pre_link = link.rsplit('/', 1)[0]
         print(link, pre_link, '--310--')
         content = ccurl(link)
         arr = content.split('\n')
         if '#EXTM3U' in arr[0]:
             arr = arr[1:]
         j = 0
         quality_tuple = []
         for i in arr:
             i = i.strip()
             if i.startswith('#'):
                 link_new = arr[j + 1]
                 if i.endswith('x360'):
                     val = ('360p', link_new)
                 elif i.endswith('x480'):
                     val = ('480p', link_new)
                 elif i.endswith('x720'):
                     val = ('720p', link_new)
                 elif i.endswith('x1080'):
                     val = ('1080p', link_new)
                 quality_tuple.append(val)
             j = j + 1
         if quality_tuple:
             quality_dict = dict(quality_tuple)
             tfinal = self.get_quality_dict(quality_dict, quality)
             if tfinal:
                 if tfinal.startswith('/'):
                     tfinal = tfinal[1:]
                 final = pre_link + '/' + tfinal
             print(pre_link, tfinal)
     return final
예제 #41
0
	def get_new_server(self,nurl,quality):
		final = ''
		if 'mycloud' in nurl:
			content = ccurl(nurl)
			tlink = re.search('"file":"[^"]*',content).group()
			link = tlink.replace('"file":"','',1)
			if 'http' not in link:
				link = 'https://' + re.search('[a-zA-Z0-9][^"]*',link).group()
			pre_link = link.rsplit('/',1)[0]
			print(link,pre_link,'--310--')
			content = ccurl(link)
			arr = content.split('\n')
			if '#EXTM3U' in arr[0]:
				arr = arr[1:]
			j = 0
			quality_tuple = []
			for i in arr:
				i = i.strip()
				if i.startswith('#'):
					link_new = arr[j+1]
					if i.endswith('x360'):
						val = ('360p',link_new)
					elif i.endswith('x480'):
						val = ('480p',link_new)
					elif i.endswith('x720'):
						val = ('720p',link_new)
					elif i.endswith('x1080'):
						val = ('1080p',link_new)
					quality_tuple.append(val)
				j = j + 1
			if quality_tuple:
				quality_dict = dict(quality_tuple)
				tfinal = self.get_quality_dict(quality_dict,quality)
				if tfinal:
					if tfinal.startswith('/'):
						tfinal = tfinal[1:]
					final = pre_link + '/' + tfinal
				print(pre_link,tfinal)
		return final 
예제 #42
0
	def getCompleteList(self,opt,genre_num):
		url = "http://www.animebam.net/series"
		content = ccurl(url)
		#print(content)
		m = re.findall('/series/[^"]*',(content))
		#print m
		j=0
		for i in m:
			m[j]=re.sub("/series/","",i)
			j = j+1
		if opt == "Random":
			m = random.sample(m, len(m))
		return m
예제 #43
0
	def getCompleteList(self,opt,ui,progress,tmp_dir,hist_folder):
		m = ['Not Able To Open']
		if opt == 'Open':
			MainWindow = QtWidgets.QWidget()
			item, ok = QtWidgets.QInputDialog.getText(MainWindow, 'Input Dialog', 'Enter Torrent Url or Magnet Link or local torrent file path')
			if ok and item:
				if (item.startswith('http') or item.startswith('/')) and item.endswith('.torrent'):
					home = hist_folder
					name1 = os.path.basename(item).replace('.torrent','')
					torrent_dest1 = os.path.join(tmp_dir,name1+'.torrent')
					if not os.path.exists(torrent_dest1):
						if item.startswith('http'):
							ccurl(item+'#'+'-o'+'#'+torrent_dest1)
						else:
							shutil.copy(item,torrent_dest1)
					if os.path.exists(torrent_dest1):
						info = lt.torrent_info(torrent_dest1)
						name = info.name()
						torrent_dest = os.path.join(home,name+'.torrent')
						shutil.copy(torrent_dest1,torrent_dest)
					m = [name]
				elif item.startswith('magnet:'):
					
					torrent_handle,stream_session,info = get_torrent_info_magnet(item,tmp_dir,ui,progress,tmp_dir)
					torrent_file = lt.create_torrent(info)
					
					home = hist_folder
					name = info.name()
					torrent_dest = os.path.join(home,name+'.torrent')
					
					with open(torrent_dest, "wb") as f:
						f.write(lt.bencode(torrent_file.generate()))
						
					torrent_handle.pause()
					stream_session.pause()
					m = [name]
		return m
예제 #44
0
	def getFinalUrl(self,name,epn,mirror,quality):
		epn1 = epn.rsplit('-',1)[0]
		optcode = epn.rsplit('-',1)[1]
		url = "http://www.animebam.net/" + epn1
		print(url)
		content = ccurl(url)
		m = re.findall('/embed/[^"]*',content)
		print(m)

		rfr = 'http://www.animebam.net/jwplayer/jwplayer.flash.swf'
		if m:
			url = 'http://www.animebam.net' + m[0]
		else:
			url = ''
		print(url)
		if url:
			content = ccurl(url)
		else:
			content = ''
		try:
			pre = re.search('file: "/play[^"]*',content).group()
			pre = pre.replace('file: "','')
			print(pre)
			url = 'http://www.animebam.net' + pre
			content = ccurl(url+'#'+'-Ie'+'#'+rfr)
			final = ''
			#print(content)
			if "Location:" in content:
				m = re.findall('Location: [^\n]*',content)
				print(m)
				final = re.sub('Location: |\r','',m[-1])
		except Exception as e:
			print(e,'--error--in--resolving--url--')
			final= ''
		return final
		"""
예제 #45
0
	def getCompleteList(self,opt,genre_num):
		url = "http://anime-joy.tv/animelist"
		content = ccurl(url)
		#print(content)
		m = re.findall('watch/[^"]*',content)
		j=0
		search = []
		for i in m:
			i = re.sub('watch/',"",i)
			m[j] = i
			j = j + 1
		m = list(set(m))
		m.sort()
		if opt == "Random":
			m = random.sample(m, len(m))
		return m
예제 #46
0
	def get_epn_url(self,name,epn,mirror,quality):
		final = ''
		new_epn = epn.split('/')[-1]
		if '::' in new_epn:
			id_arr = new_epn.split('::')
			print(id_arr,mirror)
			if mirror <= len(id_arr):
				epn_id = id_arr[mirror-1]
			else:
				epn_id = id_arr[0]
		else:
			epn_id = new_epn
		if not os.path.isfile(self.cookie_file):
			new_url = 'https://9anime.to'+epn.split('::')[0]
			print(new_url)
			cloudfare(new_url,quality,self.cookie_file,'watching',True,'9anime.to')
		url = 'https://9anime.to/ajax/episode/info?id='+epn_id+'&update=0'
		content = ccurl(url+'#-b#'+self.cookie_file)
		l = json.loads(content)
		_target_found = False
		for i in l:
			print(i,l[i])
			if i == 'grabber':
				_api = l[i]
			if i == 'params':
				try:
					_id = l[i]['id']
					_token = l[i]['token']
					_opt = l[i]['options']
				except Exception as e:
					print(e,'--172--')
			if i == 'target':
				_target = l[i]
				if 'mycloud' in _target or 'openload' in _target:
					_target_found = True
		if _target_found:
			nurl = _target
			if not nurl.startswith('http'):
				_target = re.search('[a-zA-Z0-9][^"]*',_target).group()
				nurl = 'https://'+_target+'&autostart=true'
			_api = None
			print(nurl)
		else:
			nurl = '?id={0}&token={1}&options={2}&mobile=0'.format(_id,_token,_opt)
		return (_api,nurl)
예제 #47
0
	def getCompleteList(self,opt,genre_num):
		m = []
		instr = "Press . or > for next page	-1"
		opt_arr = [
			'genre','mostpopular','newest','latestupdate',
			'history','series','movies','ongoing'
			]
		if opt == 'Genre' and genre_num == 0:
			url = 'https://9anime.to'
			content = ccurl(url)
			m = re.findall('/genre/[^"]*', content)
			m = list(set(m))
			m.sort()
			del m[9]
			m.pop()
			j = 0
			for i in m:
				i = re.sub('/genre/', '', m[j])
				m[j] = i
				j = j + 1
		if opt == 'History':
			print('History')
		elif (opt == 'MostPopular' or opt == 'Newest' or opt == 'LatestUpdate' 
				or opt == 'Series' or opt == 'Ongoing' or opt == 'Movies'):
			new_opt = 'newest'
			if opt.lower() == 'mostpopular':
				new_opt = 'most-watched'
			elif opt.lower() == 'newest':
				new_opt = 'newest'
			elif opt.lower() == 'latestupdate':
				new_opt = 'updated'
			elif opt.lower() == 'series':
				new_opt = 'tv-series'
			elif opt.lower() == 'movies':
				new_opt = 'movies'
			elif opt.lower() == 'ongoing':
				new_opt = 'ongoing'
			url = 'https://9anime.to/'+new_opt
			m = self.parse_page(url)
			m.append(instr)
		if genre_num == 1 or opt.lower() not in opt_arr:
			url = 'https://9anime.to/genre/' + opt
			m = self.parse_page(url)
			m.append(instr)
		return m
예제 #48
0
	def search(self,name):
		strname = str(name)
		print(strname)
		url = "http://anime-joy.tv/animelist"
		content = ccurl(url)
		m = re.findall('watch/[^"]*',content)
		j=0
		search = []
		for i in m:
			i = re.sub('watch/',"",i)
			m[j] = i
			j = j + 1
		m = list(set(m))
		m.sort()
		s = []
		for i in m:
			m = re.search('[^"]*'+strname+'[^"]*',i)
			if m:
				found = m.group(0)
				s.append(found)
			
		return s
예제 #49
0
	def get_direct_grabber(self,url):
		_ts = '0'
		_val = '0'
		link_split = url.split('?')[-1]
		link_arr = link_split.split('&')
		for i in link_arr:
			if i.startswith('ts='):
				_ts = i
			elif i.startswith('_='):
				_val = i
		content = ccurl(url+'#-b#'+self.cookie_file)
		l = json.loads(content)
		_target_found = False
		for i in l:
			print(i,l[i])
			if i == 'grabber':
				_api = l[i]
			if i == 'params':
				try:
					_id = l[i]['id']
					_token = l[i]['token']
					_opt = l[i]['options']
				except Exception as e:
					print(e,'--172--')
			if i == 'target':
				_target = l[i]
				if 'mycloud' in _target or 'openload' in _target:
					_target_found = True
		if _target_found:
			nurl = _target
			if not nurl.startswith('http'):
				_target = re.search('[a-zA-Z0-9][^"]*',_target).group()
				nurl = 'https://'+_target+'&autostart=true'
			_api = None
			print(nurl)
		else:
			nurl = '?{0}&{1}id={2}&token={3}&options={4}&mobile=0'.format(_ts,_val,_id,_token,_opt)
		return (_api,nurl)
예제 #50
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		epn_num = ''
		if extra_info:
			name,epn_num = name.rsplit('--',1) 
			
		url = 'http://kissanime.ru/Anime/' + name
		print(url)
		content = self.ccurlN(url)
		epl = re.findall('/Anime/' + name + '[^"]*["?"]id[^"]*', content)
		try:
			img = re.findall('https://kissanime.ru/Uploads/Etc/[^"]*.jpg', content)
			if not img:
				img = re.findall('http://cdn.myanimelist.net/[^"]*.jpg', content)	
			print(img)
			picn = os.path.join(self.tmp_dir,name+'.jpg')
			print(picn)
			if img:
				print(img[0])
			if not os.path.isfile(picn):
				ccurl(img[0]+'#'+'-o'+'#'+picn,self.cookie_file)
		except:
			picn = os.path.join(self.tmp_dir,name+'.jpg')
		j = 0
		for i in epl:
			i = re.sub('/Anime/' + name + '/', '', i)
			epl[j] = i
			j = j + 1

		
		soup = BeautifulSoup(content,'lxml')
		summary = ""
		summary1 = ""
		try:
			link = soup.findAll('span',{'class':'info'})
			for i in link:
				l = (i.text).lower()
				if "genres" in l or "other name" in l or "country" in l or "date aired" in l or 'status' in l:
					
					k = i.findPrevious('p')
					if 'status' in l:
						t = k.text
						t = re.sub('"','',t)
						t = re.sub('Views:[^"]*','',t)
						summary = summary + t
					else: 
						summary = summary + k.text
				if "summary" in l:
					j = i.findNext('p')
					if j:
						summary1 = j.text
				
			summary = summary + summary1
			summary = re.sub('\r','',summary)
			summary = re.sub('\n\n','\n',summary)
		except:
			summary = 'Summary Not Available'
			
		epl=naturallysorted(epl)  
		if extra_info and epn_num:
			epl[:] = []
			epl.append(epn_num)
			
		record_history = True
		display_list = True
		return (epl,summary,picn,record_history,depth_list)
	def Browse(self,url,quality):
		content = ccurl(url+'#-b#'+self.cookie_file)
		if 'Are You Human' in content:
			self.captcha = True
		if 'checking_browser' in content:
			self.get_cookie = True
			
		if self.get_cookie or self.get_epn or self.captcha or self.get_link:
			if os.path.exists(self.cookie_file) and not self.get_link:
				os.remove(self.cookie_file)
			if not os.path.exists(self.cookie_file):
				
				self.cookie = QtNetwork.QNetworkCookieJar()
				self.nam = NetWorkManager(self.get_link)
				self.nam.setCookieJar(self.cookie)
			else:
				cookie_arr = QtNetwork.QNetworkCookieJar()
				c = []
				f = open(self.cookie_file,'r')
				lines = f.readlines()
				f.close()
				for i in lines:
					k = re.sub('\n','',i)
					l = k.split('	')
					d = QtNetwork.QNetworkCookie()
					d.setDomain(l[0])
					if l[1]== 'TRUE':
						l1= True
					else:
						l1= False
					d.setHttpOnly(l1)
					d.setPath(l[2])
					if l[3]== 'TRUE':
						l3= True
					else:
						l3= False
					d.setSecure(l3)
					l4 = int(l[4])
					print(l4)
					d.setExpirationDate(QtCore.QDateTime.fromTime_t(l4))
					d.setName(bytes(l[5],'utf-8'))
					d.setValue(bytes(l[6],'utf-8'))
					c.append(d)
					#cookie_arr.append(d)
				cookie_arr.setAllCookies(c)
				self.nam = NetWorkManager(self.get_link)
				self.nam.setCookieJar(cookie_arr)
			print('---364----')
			self.web = Browser(
				url,quality,self.cookie_file,self.end_point,self.get_cookie,
				self.domain_name,self.captcha,self.get_epn,self.get_link)
			self.tab_2 = QtWidgets.QWidget()
			if self.captcha:
				self.tab_2.setMaximumSize(700,700)
			else:
				self.tab_2.setMaximumSize(300,50)
			self.tab_2.setWindowTitle('Wait!')
			self.horizontalLayout_5 = QtWidgets.QVBoxLayout(self.tab_2)
			print('Browse: '+url)
			
			self.horizontalLayout_5.addWidget(self.web)
			self.tab_2.show()
			#self.tab_2.hide()
			#self.web.show()
			self.web.page().setNetworkAccessManager(self.nam)
			self.web.load(QUrl(url))
			cnt = 0
			
			lnk_file = os.path.join(self.tmp_dir,'lnk.txt')
			if os.path.exists(lnk_file):
				os.remove(lnk_file)
			
			while((not os.path.exists(self.cookie_file) or (self.get_link and not os.path.exists(lnk_file))) and cnt < 30):
				#print()
				print('wait Clouflare ')
				time.sleep(1)
				QtWidgets.QApplication.processEvents()
				cnt = cnt+1
				if self.get_link:
					self.tab_2.setWindowTitle('Link Resolving '+str(cnt)+'s')
				else:
					self.tab_2.setWindowTitle('Wait! Cloudflare '+str(cnt)+'s')
				
			if cnt >= 30 and not os.path.exists(self.cookie_file):
				f = open(self.cookie_file,'w')
				f.close()
			
			cnt = 0
			cnt_limit = 30
			if self.captcha:
				cnt_limit = 60
			if ('kimcartoon' in url or 'kissasian' in url or 'kissanime' in url or self.get_link) and quality:
				while(not os.path.exists(lnk_file) and cnt < cnt_limit):
					print('wait Finding Link ')
					time.sleep(1)
					QtWidgets.QApplication.processEvents()
					cnt = cnt+1
					self.tab_2.setWindowTitle('Link Resolving '+str(cnt)+'s')
				
				if os.path.exists(lnk_file):
					self.web.setHtml('<html>Link Obtained</html>')
					link = open(lnk_file).read()
					print(link)
				else:
					self.web.setHtml('<html>No Link Found</html>')
					print('No Link Available or Clear The Cache')
			else:
				self.web.setHtml('<html>cookie Obtained</html>')
			if self.captcha:
				self.tab_2.show()
			else:
				self.tab_2.hide()
		else:
			f = open(self.cookie_file,'w')
			f.close()
	def _loadProgress(self):
		
		if (not self.cnt and os.path.exists(self.cookie_file) and self.get_epn):
			frame = self.mainFrame()
			html = frame.toHtml()
			txt = parse_file(html,self.url,self.quality)
			print(txt)
			if txt:
				urlLnk = ''
				if 'kissanime' in self.url:
					url_val = frame.evaluateJavaScript('ovelWrap("{0}");'.format(txt))
				else:
					url_val = frame.evaluateJavaScript('$kissenc.decrypt("{0}");'.format(txt))
				print(url_val)
				
				if url_val.startswith('http'):
					y1 = re.findall("http[^']*",url_val)
					print(y1)
					for y in y1:
						content = ccurl(y+'#'+'-I')
						if "Location:" in content:
							m = re.findall('Location: [^\n]*',content)
							urlLnk = re.sub('Location: |\r','',m[-1])
						else:
							urlLnk = y
					print(urlLnk)
					f = open(os.path.join(TMP_DIR,'lnk.txt'),'w')
					f.write(urlLnk)
					f.close()
					self.cnt = 1
				"""
				doc = frame.documentElement()
				if 'kissanime' in self.url:
					bt = doc.findFirst("select[id=slcQualix]")
				else:
					bt = doc.findFirst("select[id=selectQuality]")
				#txt = arr[-1]
				bt.evaluateJavaScript('this.value="'+txt+'"')
				self.cnt = 1
				"""
		
		if self.get_cookie:
			listCookies = self.networkAccessManager().cookieJar().allCookies()
			#print(listCookies)
			n = []
			m = ''
			o = ''
			for cookie in  listCookies:
				k=cookie.toRawForm()
				#k = getContentUnicode(k)
				k = re.sub("b'","'",str(k))
				#print(k)
				j = re.findall("'[^']*",k)
				for i in j:
					i = re.sub("'",'',i)
					if 'kissanime.ru' in i or 'kissasian.com' in i or 'kimcartoon.me' in i or 'masterani.me' in i or 'animeget.io' in i or 'animeplace.co' in i or 'moetube.net' in i or 'nyaa.se' in i or self.domain_name:
						j = re.findall('expires=[^;]*',i)
						if j:
							l = re.sub('expires=','',j[0])
							d = datetime.strptime(l,"%a, %d-%b-%Y %H:%M:%S %Z")
							t = calendar.timegm(d.timetuple())
							i = i+'; expiry='+str(int(t))
						else:
							i = i+'; expiry='+str(0)
						n.append(i)
			#print(n)
			cfc=''
			cfd =''
			asp = ''
			idt = ''
			test_idt = ''
			utmc = ''
			clr = False
			reqkey = ''
			new_arr = []
			for i in n:
				if self.end_point in i:
					clr = True
					print(n)
			if clr:
				for i in n:
					if 'cf_clearance' in i:
						cfc = self.cookie_split(i)
					elif '__cfduid' in i:
						cfd = self.cookie_split(i)
					elif 'ASP.NET_SessionId' in i:
						asp = self.cookie_split(i)
					elif 'idtz' in i:
						idt = self.cookie_split(i)
					elif '__utmc' in i:
						utmc = self.cookie_split(i)
					elif self.domain_name:
						reqkey = self.cookie_split(i)
						try:
							if reqkey['domain']:
								if self.domain_name in reqkey['domain']:
									dm = True
								try:
									reqkey['expiry']
								except:
									reqkey.update({'expiry':'0'})
								try:
									reqkey['HttpOnly']
								except:
									reqkey.update({'HttpOnly':'False'})
								if reqkey:
									str3 = reqkey['domain']+'	'+'FALSE'+'	'+reqkey['path']+'	'+'FALSE'+'	'+reqkey['expiry']+'	'+reqkey['name_id']+'	'+reqkey[reqkey['name_id']]
									new_arr.append(str3)
						except Exception as e:
							print(e,'--240--')
							
			if new_arr:
				f = open(self.cookie_file,'w')
				for i in new_arr:
					f.write(i+'\n')
				f.close()
			elif (cfc and cfd):
				#print(cfc)
				#print(cfd)
				#print(asp)
				str3 = ''
				str1 = cfc['domain']+'	'+cfc['HttpOnly']+'	'+cfc['path']+'	'+'FALSE'+'	'+cfc['expiry']+'	'+'cf_clearance'+'	'+cfc['cf_clearance']
				str2 = cfd['domain']+'	'+cfd['HttpOnly']+'	'+cfd['path']+'	'+'FALSE'+'	'+cfd['expiry']+'	'+'__cfduid'+'	'+cfd['__cfduid']
				if asp:
					str3 = asp['domain']+'	'+'FALSE'+'	'+asp['path']+'	'+'FALSE'+'	'+asp['expiry']+'	'+'ASP.NET_SessionId'+'	'+asp['ASP.NET_SessionId']
					
				if idt:
					str3 = idt['domain']+'	'+'FALSE'+'	'+idt['path']+'	'+'FALSE'+'	'+idt['expiry']+'	'+'idtz'+'	'+idt['idtz']
					
				if 'kissasian' in self.url:
					str3 = 'kissasian.com	FALSE	/	FALSE	0		__test'
				
				if utmc:
					str3 = utmc['domain']+'	'+'FALSE'+'	'+utmc['path']+'	'+'FALSE'+'	'+str(0)+'	'+'__utmc'+'	'+utmc['__utmc']
				
				if reqkey:
					str3 = reqkey['domain']+'	'+'FALSE'+'	'+reqkey['path']+'	'+'FALSE'+'	'+reqkey['expiry']+'	'+reqkey['name_id']+'	'+reqkey[reqkey['name_id']]
				
				f = open(self.cookie_file,'w')
				if str3:
					f.write(str2+'\n'+str1+'\n'+str3)
				else:
					f.write(str2+'\n'+str1)
				f.close()
예제 #53
0
	def Browse(self,url):
		hdr = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'
		html = ''
		home1 = os.path.expanduser("~")
		
		BASEDIR,BASEFILE = os.path.split(os.path.abspath(__file__))
		
		if 'AnimeWatch' in BASEDIR:
			enginePath = os.path.join(home1,'.config','AnimeWatch','src','Plugins','headlessEngine.py')
		elif 'kawaii-player' in BASEDIR:
			enginePath = os.path.join(home1,'.config','kawaii-player','src','Plugins','headlessEngine.py')
		tmp_dir,new_c = os.path.split(self.cookie_file)
		print(self.get_link,'--get_link___--')
		content = ccurl(url+'#-b#'+self.cookie_file)
		if 'checking_browser' in content or self.get_cookie == 'true' or self.get_link == 'true':
			if os.path.exists(self.cookie_file) and self.get_link == 'None':
				os.remove(self.cookie_file)
			if os.name == 'posix':
				print('--checking__browser-----57--',self.py,sys.executable)
				print(enginePath,url,self.quality,self.cookie_file)
				p = subprocess.Popen([self.py,enginePath,url,self.quality,
						self.cookie_file,self.end_pt,self.get_cookie,self.domain_name,self.path_val,self.get_link])
			else:
				p = subprocess.Popen([self.py,enginePath,url,self.quality,
						self.cookie_file,self.end_pt,self.get_cookie,self.domain_name,self.path_val,self.get_link],shell=True)
			
			cnt = 0
			
			lnk_file = os.path.join(tmp_dir,'lnk.txt')
			if os.path.exists(lnk_file):
				os.remove(lnk_file)
			print(lnk_file,'--lnk--file--')
			while((not os.path.exists(self.cookie_file) or (self.get_link=='true' and not os.path.exists(lnk_file))) and cnt < 20):
				print(cnt)
				print('wait Clouflare ')
				time.sleep(1)
				cnt = cnt+1
			p.kill()
			if 'kissasian' in url or 'kimcartoon' in url:
				if 'kissasian' in url:
					str3 = '\nkissasian.com	FALSE	/	FALSE	0		__test'
				else:
					str3 = '\nkimcartoon.me	FALSE	/	FALSE	0		__test'
				f = open(self.cookie_file,'a')
				f.write(str3)
				f.close()
			if ('id=' in url) and os.path.exists(self.cookie_file) and ('kimcartoon' in url or 'kissasian' in url or 'kissanime' in url):
				if os.name == 'posix':
					p = subprocess.Popen([self.py,enginePath,url,self.quality,
							self.cookie_file,self.end_pt,self.get_cookie,self.domain_name,self.path_val,self.get_link])
				else:
					p = subprocess.Popen([self.py,enginePath,url,self.quality,
							self.cookie_file,self.end_pt,self.get_cookie,self.domain_name,self.path_val,self.get_link],shell=True)
				cnt = 0
				while(not os.path.exists(lnk_file) and cnt < 60):
					print(cnt)
					print('wait Clouflare ')
					time.sleep(1)
					cnt = cnt+1
			p.kill()
		else:
			if ('id=' in url) and os.path.exists(self.cookie_file) and ('kimcartoon' in url or 'kissasian' in url or 'kissanime' in url):
				lnk_file = os.path.join(tmp_dir,'lnk.txt')
				if os.path.exists(lnk_file):
					os.remove(lnk_file)
				print(lnk_file,'--lnk--file--')
				if os.name == 'posix':
					p = subprocess.Popen([self.py,enginePath,url,self.quality,
							self.cookie_file,self.end_pt,self.get_cookie,self.domain_name,self.path_val,self.get_link])
				else:
					p = subprocess.Popen([self.py,enginePath,url,self.quality,
							self.cookie_file,self.end_pt,self.get_cookie,self.domain_name,self.path_val,self.get_link],shell=True)
				cnt = 0
				file_path = os.path.join(tmp_dir,'tmp_cookie')
				while(not os.path.exists(lnk_file) and cnt < 60):
					print(cnt)
					print('wait Clouflare ')
					time.sleep(1)
					cnt = cnt+1
				p.kill()
			else:
				f = open(self.cookie_file,'w')
				f.close()
예제 #54
0
	def getCompleteList(self,opt,genre_num):
		if opt == 'Genre' and genre_num == 0:
			url = "http://www.shoutcast.com/"
			#content = ccurl(url,"",1)
			content = ccurl(url)
			m = re.findall('Genre[^"]name[^"]*',content)
			#print m
			j = 0
			for i in m:
				m[j] = re.sub('Genre[^"]name=','',i)
				m[j] = re.sub("[+]|%20",' ',m[j])
				j = j+1
			m.sort()
			print(m)
			#n = ["History","Genre","TV"]
			n = ["History","Genre"]
			m = n + m
		elif opt == 'History':
			a =0
		elif opt == 'TV':
			name = []
			track = []
			aformat = []
			listeners = []
			bitrate = []
			idr = []
			url = "http://thugie.nl/streams.php"
			#content = ccurl(url,"",4)
			content = ccurl(url)
			soup = BeautifulSoup(content,'lxml')
			tmp = soup.prettify()
			#m = soup.findAll('div',{'class':'boxcenterdir fontstyle'})
			#soup = BeautifulSoup(tmp,'lxml')
			m = []
			links = soup.findAll('div',{'class':'dirOuterDiv1 clearFix'})
			for i in links:
				j = i.findAll('a')
				q = i.find_next('h2')
				g = i.find_next('h4')
				z = g.find_next('h4')
				for k in j:
					idr.append(k['href'].split('=')[-1][:-1])
				l = i.text
				n = re.findall('Station:[^"]*',l)
				p = re.sub('Playing','\nPlaying',n[0])
				p=p.rstrip()
				a = p.split('\n')
				name.append(a[0].split(":")[1])
				track.append(a[1].split(':')[1])
				aformat.append(q.text)
				listeners.append(g.text)
				bitrate.append(z.text)
			for i in range(len(idr)):
				m.append(name[i].strip().replace('/','-')+'-TV	id='+str(idr[i]).replace('\\','')+'\nBitrate='+str(bitrate[i])+'\nListeners='+str(listeners[i])+'\n')
		else:
			url = "https://www.shoutcast.com/Home/BrowseByGenre"
			#content = ccurl(url,opt,1)
			post = 'genrename='+opt
			content = ccurl(url+'#'+'-d'+'#'+post)
			m = self.process_page(content)
		print(opt,url)
		return m
예제 #55
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		
		
		epn_num = ''
		if extra_info:
			name,epn_num = name.rsplit('--',1) 
		
		url = 'http://kissasian.com/Drama/' + name
		print(url)
		content = self.ccurlN(url)
		#f = open('/tmp/AnimeWatch/1.txt','w')
		#f.write(content)
		#f.close()
		epl = re.findall('/Drama/' + name +'/' +'[^"]*["?"]id[^"]*', content)
		#if not epl:
		#	epl = re.findall('[^"]*?id=[^"]*', content)
		try:
			img = re.findall('http://kissasian.com/Uploads/Etc/[^"]*.jpg', content)
			if not img:
				img = re.findall('http://cdn.myanimelist.net/[^"]*.jpg', content)	
			print(img)
			#jpgn = img[0].split('/')[-1]
			#print('Pic Name=' + jpgn
			#picn = '/tmp/AnimeWatch/' + name + '.jpg'
			picn = os.path.join(self.tmp_dir,name+'.jpg')
			print(picn)
			if img:
				#img[0]=img[0].replace('kissanime.com','kissanime.to')
				print(img[0])
			if not os.path.isfile(picn):
				#subprocess.call(['curl','-L','-b','/tmp/AnimeWatch/kcookieD.txt','-A',self.hdr,'-o',picn,img[0]])
				ccurl(img[0]+'#'+'-o'+'#'+picn,self.cookie_file)
		except:
			#picn = '/tmp/AnimeWatch/' + name + '.jpg'
			picn = os.path.join(self.tmp_dir,name+'.jpg')
		j = 0
		for i in epl:
			i = re.sub('/Drama/' + name + '/', '', i)
			epl[j] = i
			j = j + 1

		#try:
		soup = BeautifulSoup(content,'lxml')
		
		summary = ""
		summary1 = ""
		try:
			link = soup.findAll('span',{'class':'info'})
			#link = soup.findAll('div',{'class':'barContent'})
			for i in link:
				l = (i.text).lower()
				if "genres" in l or "other name" in l or "country" in l or "date aired" in l or 'status' in l:
					
					k = i.findPrevious('p')
					if 'status' in l:
						t = k.text
						t = re.sub('"','',t)
						t = re.sub('Views:[^"]*','',t)
						summary = summary + t
					else: 
						summary = summary + k.text
				if "summary" in l:
					j = i.findNext('p')
					if j:
						summary1 = j.text
				
			summary = summary + summary1
			summary = re.sub('\r','',summary)
			summary = re.sub('\n\n','\n',summary)
		except:
			summary = 'Summary Not Available'
		#print(summary)
		#print(picn)
		epl=naturallysorted(epl)  
		#epl.append(picn)
		#epl.append(summary)
		if extra_info and epn_num:
			epl[:] = []
			epl.append(epn_num)
		record_history = True
		return (epl,summary,picn,record_history,depth_list)
예제 #56
0
	def Browse(self,url):
		hdr = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'
		html = ''
		home1 = os.path.expanduser("~")
		
		BASEDIR,BASEFILE = os.path.split(os.path.abspath(__file__))
		
		if 'AnimeWatch' in BASEDIR:
			enginePath = os.path.join(home1,'.config','AnimeWatch','src','Plugins','headlessEngine.py')
		elif 'kawaii-player' in BASEDIR:
			enginePath = os.path.join(home1,'.config','kawaii-player','src','Plugins','headlessEngine.py')
		tmp_dir,new_c = os.path.split(self.cookie_file)
		
		if 'animeget' in url or 'masterani' in url or 'animeplace' in url or 'moetube' in url or 'nyaa' in url:
			content = ccurl(url)
		else:
			content = 'checking_browser'
		#web = BrowseUrl(url,quality)
		if 'checking_browser' in content:
			if os.name == 'posix':
				p = subprocess.Popen(['python3','-B',enginePath,url,self.quality,self.cookie_file])
			else:
				p = subprocess.Popen(['python','-B',enginePath,url,self.quality,self.cookie_file],shell=True)
			
			cnt = 0
			
			lnk_file = os.path.join(tmp_dir,'lnk.txt')
			if os.path.exists(lnk_file):
				os.remove(lnk_file)
			while(not os.path.exists(self.cookie_file) and cnt < 20):
				print(cnt)
				print('wait Clouflare ')
				time.sleep(1)
				cnt = cnt+1
			if 'kissasian' in url or 'kisscartoon' in url:
				if 'kissasian' in url:
					str3 = '\nkissasian.com	FALSE	/	FALSE	0		__test'
				else:
					str3 = '\nkisscartoon.se	FALSE	/	FALSE	0		__test'
				f = open(self.cookie_file,'a')
				f.write(str3)
				f.close()
			if ('id=' in url) and os.path.exists(self.cookie_file) and ('kisscartoon' in url or 'kissasian' in url or 'kissanime' in url):
				cnt = 0
				file_path = os.path.join(tmp_dir,'tmp_cookie')
				while(not os.path.exists(lnk_file) and cnt < 30):
					if os.path.exists(file_path):
						os.remove(file_path)
						if os.name == 'posix':
							p = subprocess.Popen(['python3','-B',enginePath,url,self.quality,self.cookie_file])
						else:
							p = subprocess.Popen(['python','-B',enginePath,url,self.quality,self.cookie_file],shell=True)
					print(cnt)
					print('wait Clouflare ')
					time.sleep(1)
					cnt = cnt+1
					
				
			p.kill()
		else:
			f = open(self.cookie_file,'w')
			f.close()
예제 #57
0
	def run(self):
		content = ccurl(self.url)
		soup = BeautifulSoup(content,'lxml')
		title = soup.title.text.strip().replace('/','-')
		self.ui.gotHtmlSignal.emit(title,self.url,self.file_path)
예제 #58
0
	def download(self, url,option):
		if option.lower() == 'play with animewatch':
			final_url = ''
			self.ui.epn_name_in_list = self.title_page
			print(self.ui.epn_name_in_list)
			if self.ui.mpvplayer_val.processId() > 0:
				self.ui.mpvplayer_val.kill()
			final_url = get_yt_url(url.toString(),self.ui.quality_val,self.ui.ytdl_path,self.ui.logger)
			if final_url:
				self.ui.watchDirectly(final_url,self.ui.epn_name_in_list,'no')
				self.ui.tab_5.show()
				self.ui.frame1.show()
				self.ui.tab_2.setMaximumWidth(self.ui.width_allowed+50)
		elif option.lower() == 'add as local playlist':
			self.get_playlist = True
			if self.playlist_dict:
				print(self.get_playlist,'=get_playlist')
				self.add_playlist(self.playlist_name)
		elif option.lower() == 'download':
			if self.ui.quality_val == 'sd480p':
				txt = "Video can't be saved in 480p, Saving in either HD or SD"
				send_notification(txt)
				quality = 'hd'
			else:
				quality = self.ui.quality_val
			finalUrl = get_yt_url(url.toString(),quality,self.ui.ytdl_path,self.ui.logger)
			finalUrl = finalUrl.replace('\n','')
			title = self.title_page+'.mp4'
			title = title.replace('"','')
			title = title.replace('/','-')
			if os.path.exists(self.ui.default_download_location):
				title = os.path.join(self.ui.default_download_location,title)
			else:
				title = os.path.join(self.ui.tmp_download_folder,title)
			command = wget_string(finalUrl,title,self.ui.get_fetch_library)
			print (command)		
			self.ui.infoWget(command,0)
			
		elif option.lower() == 'get subtitle (if available)':
			self.ui.epn_name_in_list = self.title_page
			print(self.ui.epn_name_in_list)
			get_yt_sub(url.toString(),self.ui.epn_name_in_list,
						self.yt_sub_folder,self.ui.tmp_download_folder,
						self.ui.ytdl_path,self.ui.logger)
			
		elif option.lower() == 'queue item':
			file_path = os.path.join(self.home,'Playlists','Queue')
			if not os.path.exists(file_path):
				f = open(file_path,'w')
				f.close()
			if not self.ui.queue_url_list:
				self.ui.list6.clear()
			title = self.title_page.replace('/','-')
			if title.startswith('.'):
				title = title[1:]
			r = title + '	'+url.toString()+'	'+'NONE'
			self.ui.queue_url_list.append(r)
			self.ui.list6.addItem(title)
			print (self.ui.queue_url_list)
			write_files(file_path,r,line_by_line=True)
		elif option.lower() == 'season episode link':
			if self.site != "Music" and self.site != "PlayLists":
				self.ui.getTvdbEpnInfo(url.toString())
		elif option.lower() == 'artist link' or option.lower() == 'series link':
			url = url.toString()
			r = self.ui.list1.currentRow()
			nm = self.ui.get_title_name(r)
			self.ui.posterfound_new(
				name=nm,site=self.site,url=url,direct_url=True,
				copy_summary=True,copy_poster=True,copy_fanart=True)
		else:
			url = url.toString()
			if url:
				t_content = ccurl(url+'#'+'-I')
				if 'image/jpeg' in t_content and not 'Location:' in t_content:
					pass
				elif 'image/jpeg' in t_content and 'Location:' in t_content:
					m = re.findall('Location: [^\n]*',t_content)
					found = re.sub('Location: |\r','',m[0])
					url = found
				elif not self.img_url.isEmpty():
					url = self.img_url.toString()
				else:
					return 0
					
				if option.lower() == "download as fanart":
					r = self.ui.list1.currentRow()
					nm = self.ui.get_title_name(r)
					print(option,'----')
					self.ui.posterfound_new(
						name=nm,site=self.site,url=url,direct_url=True,
						copy_summary=False,copy_poster=False,copy_fanart=True)
				elif option.lower() == "download as cover":
					r = self.ui.list1.currentRow()
					nm = self.ui.get_title_name(r)
					self.ui.posterfound_new(
						name=nm,site=self.site,url=url,direct_url=True,
						copy_summary=False,copy_poster=True,copy_fanart=False)
예제 #59
0
	def ccurlT(self,url,rfr):
		content = ccurl(url)
		return content
예제 #60
0
	def get_old_server(self,_api,nurl,quality):
		final = ''
		#url = os.path.join(_api,nurl)
		if nurl.startswith('?'):
			nurl = nurl[1:]
		url = _api + '&' + nurl
		print(url)

		content = ccurl(url)

		arr = []
		try:
			l = json.loads(content)
		except Exception as e:
			print(e)
			content = content.replace('}{',',')
			#print(content)
			try:
				l = json.loads(content)
			except Exception as e:
				print(e)
				l = []
		if l:
			for i in l:
				#print(i)
				if i == 'data':
					for k in l[i]:
						try:
							j = (k['label'],k['file'])
						except Exception as e:
							print(e)
							j = ('no-label','no-file')
						arr.append(j)
		else:
			m = re.findall('"file":"http[^"]*',content)
			print(m)
			for i in m:
				i = i.replace('"file":"','')
				k = ('no-label','no-file')
				if 'itag=18' in i:
					k = ('360p',i)
				elif 'itag=22' in i:
					k = ('720p',i)
				elif 'itag=59' in i:
					k = ('480p',i)
				elif 'itag=37' in i:
					k = ('1080p',i)
				arr.append(k)
		if arr:
			d = dict(arr)
			print(d)
			if quality == 'sd' and '360p' in d:
				final = d['360p']
			elif quality == 'sd480p':
				if '480p' in d:
					final = d['480p']
				else:
					final = d['360p']
			elif quality == 'hd':
				if '720p' in d:
					final = d['720p']
				elif '480p' in d:
					final = d['480p']
				else:
					final = d['360p']
			elif quality == 'best':
				if '1080p' in d:
					final = d['1080p']
				elif '720p' in d:
					final = d['720p']
				elif '480p' in d:
					final = d['480p']
				else:
					final = d['360p']
		if final:
			content = ccurl(final+'#'+'-I')
			m = re.findall('Location: [^\n]*',content)
			if m:
				#print(m)
				final = re.sub('Location: |\r','',m[-1])
		
		return final