예제 #1
0
    def getEpnList(self, name, opt, depth_list, extra_info, siteName,
                   category):
        if extra_info == '-1':
            arr = []
            return (arr, 'Instructions', 'No.jpg', False, depth_list)
        else:
            epn_num = ''
            if extra_info:
                name, epn_num = name.rsplit('--', 1)

            url = 'http://kimcartoon.me/Cartoon/' + name
            print(url)
            content = self.ccurlN(url)
            soup = BeautifulSoup(content)
            #f = open('/tmp/AnimeWatch/1.txt','w')
            #f.write(content)
            #f.close()
            epl = re.findall('/Cartoon/' + name + '[^"]*["?"]id[^"]*', content)
            #if not epl:
            #	epl = re.findall('[^"]*?id=[^"]*', content)
            try:
                img = re.findall('http://kimcartoon.me/Uploads/Etc/[^"]*.jpg',
                                 content)
                img_src = ''
                if not img:
                    img_src = soup.find('link', {'rel': 'image_src'})
                    if img_src and 'href' in str(img_src):
                        img_link = img_src['href']
                        if not img_link.startswith('http'):
                            if img_link.startswith('/'):
                                img_link = 'http://kimcartoon.me' + img_link
                            else:
                                img_link = 'http://kimcartoon.me/' + img_link
                else:
                    img_link = img[0]
                print(img, img_src, img_link)
                picn = os.path.join(self.tmp_dir, name + '.jpg')
                print(picn)
                if not os.path.isfile(picn):
                    if img:
                        ccurl(img_link + '#' + '-o' + '#' + picn,
                              self.cookie_file)
                    elif img_src:
                        ccurl(img_link + '#' + '-o' + '#' + picn)
            except:
                #picn = '/tmp/AnimeWatch/' + name + '.jpg'
                picn = os.path.join(self.tmp_dir, name + '.jpg')
            j = 0
            for i in epl:
                i = re.sub('/Cartoon/' + name + '/', '', i)
                epl[j] = i
                j = j + 1

            #try:
            soup = BeautifulSoup(content, 'lxml')

            summary = ""
            summary1 = ""
            try:
                link = soup.findAll('span', {'class': 'info'})
                #link = soup.findAll('div',{'class':'barContent'})
                for i in link:
                    l = (i.text).lower()
                    if "genres" in l or "other name" in l or "country" in l or "date aired" in l or 'status' in l:

                        k = i.findPrevious('p')
                        if 'status' in l:
                            t = k.text
                            t = re.sub('"', '', t)
                            t = re.sub('Views:[^"]*', '', t)
                            summary = summary + t
                        else:
                            summary = summary + k.text
                    if "summary" in l:
                        j = i.findNext('p')
                        if j:
                            summary1 = j.text

                summary = summary + summary1
                summary = re.sub('\r', '', summary)
                summary = re.sub('\n\n', '\n', summary)
            except:
                summary = 'Summary Not Available'
            epl = naturallysorted(epl)
            if extra_info and epn_num:
                epl[:] = []
                epl.append(epn_num)
            record_history = True
            return (epl, summary, picn, record_history, depth_list)
예제 #2
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		epn_num = ''
		if extra_info:
			name,epn_num = name.rsplit('--',1) 
			
		url = 'http://kissanime.ru/Anime/' + name
		print(url)
		content = self.ccurlN(url)
		epl = re.findall('/Anime/' + name + '[^"]*["?"]id[^"]*', content)
		try:
			img = re.findall('https://kissanime.ru/Uploads/Etc/[^"]*.jpg', content)
			if not img:
				img = re.findall('http://cdn.myanimelist.net/[^"]*.jpg', content)	
			print(img)
			picn = os.path.join(self.tmp_dir,name+'.jpg')
			print(picn)
			if img:
				print(img[0])
			if not os.path.isfile(picn):
				ccurl(img[0]+'#'+'-o'+'#'+picn,self.cookie_file)
		except:
			picn = os.path.join(self.tmp_dir,name+'.jpg')
		j = 0
		for i in epl:
			i = re.sub('/Anime/' + name + '/', '', i)
			epl[j] = i
			j = j + 1

		
		soup = BeautifulSoup(content,'lxml')
		summary = ""
		summary1 = ""
		try:
			link = soup.findAll('span',{'class':'info'})
			for i in link:
				l = (i.text).lower()
				if "genres" in l or "other name" in l or "country" in l or "date aired" in l or 'status' in l:
					
					k = i.findPrevious('p')
					if 'status' in l:
						t = k.text
						t = re.sub('"','',t)
						t = re.sub('Views:[^"]*','',t)
						summary = summary + t
					else: 
						summary = summary + k.text
				if "summary" in l:
					j = i.findNext('p')
					if j:
						summary1 = j.text
				
			summary = summary + summary1
			summary = re.sub('\r','',summary)
			summary = re.sub('\n\n','\n',summary)
		except:
			summary = 'Summary Not Available'
			
		epl=naturallysorted(epl)  
		if extra_info and epn_num:
			epl[:] = []
			epl.append(epn_num)
			
		record_history = True
		display_list = True
		return (epl,summary,picn,record_history,depth_list)
예제 #3
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		
		
		epn_num = ''
		if extra_info:
			name,epn_num = name.rsplit('--',1) 
		
		url = 'http://kissasian.com/Drama/' + name
		print(url)
		content = self.ccurlN(url)
		#f = open('/tmp/AnimeWatch/1.txt','w')
		#f.write(content)
		#f.close()
		epl = re.findall('/Drama/' + name +'/' +'[^"]*["?"]id[^"]*', content)
		#if not epl:
		#	epl = re.findall('[^"]*?id=[^"]*', content)
		try:
			img = re.findall('http://kissasian.com/Uploads/Etc/[^"]*.jpg', content)
			if not img:
				img = re.findall('http://cdn.myanimelist.net/[^"]*.jpg', content)	
			print(img)
			#jpgn = img[0].split('/')[-1]
			#print('Pic Name=' + jpgn
			#picn = '/tmp/AnimeWatch/' + name + '.jpg'
			picn = os.path.join(self.tmp_dir,name+'.jpg')
			print(picn)
			if img:
				#img[0]=img[0].replace('kissanime.com','kissanime.to')
				print(img[0])
			if not os.path.isfile(picn):
				#subprocess.call(['curl','-L','-b','/tmp/AnimeWatch/kcookieD.txt','-A',self.hdr,'-o',picn,img[0]])
				ccurl(img[0]+'#'+'-o'+'#'+picn,self.cookie_file)
		except:
			#picn = '/tmp/AnimeWatch/' + name + '.jpg'
			picn = os.path.join(self.tmp_dir,name+'.jpg')
		j = 0
		for i in epl:
			i = re.sub('/Drama/' + name + '/', '', i)
			epl[j] = i
			j = j + 1

		#try:
		soup = BeautifulSoup(content,'lxml')
		
		summary = ""
		summary1 = ""
		try:
			link = soup.findAll('span',{'class':'info'})
			#link = soup.findAll('div',{'class':'barContent'})
			for i in link:
				l = (i.text).lower()
				if "genres" in l or "other name" in l or "country" in l or "date aired" in l or 'status' in l:
					
					k = i.findPrevious('p')
					if 'status' in l:
						t = k.text
						t = re.sub('"','',t)
						t = re.sub('Views:[^"]*','',t)
						summary = summary + t
					else: 
						summary = summary + k.text
				if "summary" in l:
					j = i.findNext('p')
					if j:
						summary1 = j.text
				
			summary = summary + summary1
			summary = re.sub('\r','',summary)
			summary = re.sub('\n\n','\n',summary)
		except:
			summary = 'Summary Not Available'
		#print(summary)
		#print(picn)
		epl=naturallysorted(epl)  
		#epl.append(picn)
		#epl.append(summary)
		if extra_info and epn_num:
			epl[:] = []
			epl.append(epn_num)
		record_history = True
		return (epl,summary,picn,record_history,depth_list)
예제 #4
0
    def getEpnList(self, name, opt, depth_list, extra_info, siteName,
                   category):
        epn_num = ''
        if extra_info:
            name, epn_num = name.rsplit('--', 1)

        url = 'http://kissanime.ru/Anime/' + name
        print(url)
        content = self.ccurlN(url)
        epl = re.findall('/Anime/' + name + '[^"]*["?"]id[^"]*', content)
        try:
            img = re.findall('https://kissanime.ru/Uploads/Etc/[^"]*.jpg',
                             content)
            if not img:
                img = re.findall('http://cdn.myanimelist.net/[^"]*.jpg',
                                 content)
            print(img)
            picn = os.path.join(self.tmp_dir, name + '.jpg')
            print(picn)
            if img:
                print(img[0])
            if not os.path.isfile(picn):
                ccurl(img[0] + '#' + '-o' + '#' + picn, self.cookie_file)
        except:
            picn = os.path.join(self.tmp_dir, name + '.jpg')
        j = 0
        for i in epl:
            i = re.sub('/Anime/' + name + '/', '', i)
            epl[j] = i
            j = j + 1

        soup = BeautifulSoup(content, 'lxml')
        summary = ""
        summary1 = ""
        try:
            link = soup.findAll('span', {'class': 'info'})
            for i in link:
                l = (i.text).lower()
                if "genres" in l or "other name" in l or "country" in l or "date aired" in l or 'status' in l:

                    k = i.findPrevious('p')
                    if 'status' in l:
                        t = k.text
                        t = re.sub('"', '', t)
                        t = re.sub('Views:[^"]*', '', t)
                        summary = summary + t
                    else:
                        summary = summary + k.text
                if "summary" in l:
                    j = i.findNext('p')
                    if j:
                        summary1 = j.text

            summary = summary + summary1
            summary = re.sub('\r', '', summary)
            summary = re.sub('\n\n', '\n', summary)
        except:
            summary = 'Summary Not Available'

        epl = naturallysorted(epl)
        if extra_info and epn_num:
            epl[:] = []
            epl.append(epn_num)

        record_history = True
        display_list = True
        return (epl, summary, picn, record_history, depth_list)
예제 #5
0
	def getEpnList(self,name,opt,depth_list,extra_info,siteName,category):
		m = []
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			base = "http://allmyanime.io/"
			url = base+ "watch/" + name+"/"
		elif siteName == "Dubcrazy":
			#base = "http://www.dubbedanimeonline.us/"
			base = "http://www.dubbedanimeon.me/"
			url = base+ "view/" + name+"/" 
		elif siteName == "Animetycoon":
			base = "http://www.animetycoon.org/"
			url = base+ "watch/" + name+"/"
		elif siteName == "AniDub":
			base = "https://www.watchcartoononline.io/"
			if category == "Movie":
					url = "https://www.watchcartoononline.io/" + name
			else:
					url = "https://www.watchcartoononline.io/anime/" + name
		elif siteName == "AnimeStatic":
			base = "http://www.animestatic.co/"
			if category == "Movies": 
				url = "http://www.animestatic.co/" + name + '/'
			else:
				url = "http://www.animestatic.co/anime/" + name + '/'
		elif siteName == "CartoonMax":
			url = "http://gogocartoon.us/category/" + name 
			base = "http://gogocartoon.us/"
			
		
		print(url)
		#if base_url == 0:
		#content = subprocess.check_output(['curl','-A',hdr,url]) 
		#else:
		#	content = ccurl(url,"no_redir")
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
			content = ccurlNew(url+'#'+'-L')
		else:
			#content = ccurlNew(url+'#'+'-L')
			if siteName == 'Dubcrazy':
				content = ccurlRequest(url,method='GET',curl_opt='-L')
			else:
				content = ccurlNew(url+'#'+'-L')
		soup = BeautifulSoup(content,'lxml')
		print(soup.prettify())
		if siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" or siteName == "Cartoon-World-Movies":
				"""
				link1 = soup.findAll('div',{'class':'ani-row'})
				print(link1)
				try:
					img1 = link1[0].find('img',{'class':'anime'})
					print(img1)
					img = img1['src']
					if not "http://" in img:
						img2 = re.findall('/images/[^"]*',img)
						img = "http://allmyanime.io"+img2[0]
						print(img)
					
					#picn = "/tmp/AnimeWatch/"+name+'.jpg'
					picn = os.path.join(self.tmp_dir,name+'.jpg')
					if not os.path.isfile(picn) and img:
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					picn = "No.jpg"
					img = ""
				try:
					summary=str(link1[1])
				
					summary = re.sub('</table>','</table><div class="desc">',summary)
					summary = re.sub('</div>','</div></div>',summary)
					print(summary)
					soup = BeautifulSoup(summary,'lxml')

					info = soup.findAll('td',{'class':'ani-table-ans'})

					summary = info[0].text+'\nType: '+ info[1].text+ '\nAired: ' + info[2].text + '\nGenre: ' + info[3].text+soup.find('div',{'class':'desc'}).text 
				except:
					summary = "No Summary Available"
				"""
				arr = []
				try:
					summary = ""
					link = soup.find('p',{'class':'margin-top-10'})
					#print(link
					summary = link.text
					
					link_img = soup.findAll('img',{'class':'img-responsive margin-top-10'})
					
					
					#img = "http://www.dubbedanimeon.me/images/" + name+".jpg"
					img = link_img[-1]['src']
					
					print(img)
					#picn = "/tmp/AnimeWatch/" + name + ".jpg"
					picn = os.path.join(self.tmp_dir,name+'.jpg')
					if not os.path.isfile(picn):
						#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
						ccurlNew(img+'#'+'-o'+'#'+picn)
				except:
					summary = "No Summary Available"
					picn = "No"
				
				m = soup.findAll('a',{'class':'subbed'})
				n = soup.findAll('a',{'class':'dubbed'})
				
				arr = []
				
				for i in m:
					if 'href' in str(i):
						arr.append(i['href'].split('/')[-1]+'-subbed'+'	'+i['href'])
						
				for i in n:
					if 'href' in str(i):
						arr.append(i['href'].split('/')[-1]+'-dubbed'+'	'+i['href'])
				
		elif siteName == "AniDub" or siteName == "AnimeStatic":
			m = []
			summary = ''
			if category == "Movies":
				m.append(name)
			else:
				if siteName == "AniDub":
					link = soup.findAll('div',{'id':'catlist-listview'})
				else:
					link = soup.findAll('ul',{ 'class':'eps eps-list'})
				for i in link:
					a = i.findAll('a')
					for j in a:
							k=(j['href']).split('/')
							if siteName == "AniDub":
								m.append(k[-1])
							else:
								m.append(k[-2])
		
			if siteName == "AniDub":
				img = []
				link = soup.findAll('div',{'class':'iltext'})		 	
				for i in link:
					summary = re.sub('\n','',i.text)
				img = re.findall('http[^"]*.jpg',content)
				
			elif siteName == "AnimeStatic":
				link = soup.find("div",{ "class":"deskripsi"})
				summary = ""
				img = []
				if link:
					
					sumr= link.find('p')
					summary = sumr.text
					#summary = re.sub('Genres[^\n]*\n','Genres : ',summary)
					#summary = re.sub('Title[^\n]*\n','Title : ',summary)
					#summary = re.sub('Rating[^\n]*\n','Rating : ',summary)
					#summary = re.sub('[)]','',summary)
					#summary = re.sub('[,][^"]\n','\n',summary)
				link = soup.find('div',{'class':'imganime'})
				if link:
					img1 = link.find('img')
					if img1:
						img.append(img1['src'])
					
			#picn = "/tmp/AnimeWatch/" + name + ".jpg"
			picn = os.path.join(self.tmp_dir,name+'.jpg')
			if not os.path.isfile(picn) and img:
				#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img[0]])
				ccurlNew(img[0]+'#'+'-o'+'#'+picn)
		elif siteName == "Animetycoon":
			img =[]
			#text = str(text)
			#print(text
			try:
				text = soup.find('article')
				text1 = text.find('p')
				summary = text1.text
				try:
					img1 = text.find('img')['src']
					if 'http' not in img1:
						img1 = 'http:' + img1
					img.append(img1)
				except:
					img = re.findall('//[^"]*posters/[^"]*.jpg',content)
					img[0] = "http:" + img[0]
				#picn = "/tmp/AnimeWatch/" + name + ".jpg"
				picn = os.path.join(self.tmp_dir,name+'.jpg')
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-L","-o",picn,img[0]])
					ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				
			except: 
				summary = "No Summary Available"
				picn = "No"
		elif siteName == "CartoonMax":
				m = []
				link = soup.find('div',{'class':'list-chapter mCustomScrollbar'})
				if link:
					j = link.findAll('a')
					for k in j:
						tmp = k['href'].split('/')[-1]
						m.append(tmp)
					
				else:
					link = soup.find('div',{'class':'anime_info_episodes'})
				
					link1 = link.findAll('a')
					for i in link1:
						k = i['href'].split('/')[-1]
						m.append(k)
				summary = ""
				link = soup.find('div',{ 'class':'description'})
				img = []
				summary = link.text
					
				link = soup.find('div',{ 'class':'box-content'})
				img1_src = link.find('div',{ 'class':'img'})
				img_src = link.find('img')['src'] 
				if ' ' in img_src:
					img_src = re.sub(" ","%20",img_src)
				print(img_src)
				if img_src:
					img.append(img_src)
					
				print(img)
				
				#picn = "/tmp/AnimeWatch/" + name + ".jpg"
				picn = os.path.join(self.tmp_dir,name+'.jpg')
				try:
					if not os.path.isfile(picn):
						ccurlNew(img[0]+'#'+'-o'+'#'+picn)
				except:
					pass
		elif siteName == "Dubcrazy":
			
			
			try:
				summary = ""
				link = soup.find('div',{'class':'well well-sm'})
				#print(link
				summary = link.text
				
				link_img = soup.findAll('img',{'class':'img-responsive'})
				
				
				#img = "http://www.dubbedanimeon.me/images/" + name+".jpg"
				img = link_img[-1]['src']
				
				print(img)
				#picn = "/tmp/AnimeWatch/" + name + ".jpg"
				picn = os.path.join(self.tmp_dir,name+'.jpg')
				if not os.path.isfile(picn):
					#subprocess.call(["curl","-A",self.hdr,"-L","-o",picn,img])
					ccurlNew(img+'#'+'-o'+'#'+picn)
			except:
				summary = "No Summary Available"
				picn = "No"
			#print(img
		sort_arr = True
		if siteName != "AniDub" and siteName != "CartoonMax":
			if (siteName == "Cartoon-World" or siteName == "Cartoon-World-Cartoon" 
					or siteName == "Cartoon-World-Movies"):
				m = arr
				sort_arr = False
			else: 
				fi = base + name+ '[^"]*/'
				m = re.findall(fi, content)
				found = False
				if not m:
					m = re.findall(name+'-dubbed-[^"]*', content)
					found = True
				j=0
				if not found:
					for i in m:
						i = re.sub(base,"",i)
						m[j] = i[:-1]
						j = j + 1
		if sort_arr:
			m=naturallysorted(m)
		#m.append(picn)
		#m.append(summary)
		record_history = True
		display_list = True
		return (m,summary,picn,record_history,depth_list)