예제 #1
0
	def process_page(self, url):
		request = self._s.get(real_url(url))
		self.soup = BeautifulSoup(clean_html(request.text), 'html.parser')
		debug(url)

		tbl = self.soup.find('table', class_='tablesorter')
		if tbl:
			tbody = tbl.find('tbody')
			if tbody:
				for tr in tbody.find_all('tr'):
					item = {}
					cat_a = tr.find('a', class_='gen')
					if cat_a:
						item['category'] = cat_a['href']
					topic_a = tr.find('a', class_='topictitle')
					if topic_a:
						item['a'] = topic_a
					dl_a = tr.find('a', attrs={'rel': "nofollow"})
					if dl_a:
						item['dl_link'] = dl_a['href']

					seeds_td = tr.find('td', attrs={'title': "Seeders"})
					if seeds_td:
						item['seeds'] = seeds_td.get_text()

					self._items.append(item.copy())
예제 #2
0
    def process_page(self, url):

        try:
            request = self._s.get(real_url(url, self.settings))
        except requests.exceptions.ConnectionError:
            return

        self.soup = BeautifulSoup(clean_html(request.text), 'html.parser')
        debug(url)

        indx = self.soup.find('div', attrs={'id': 'index'})
        if indx:
            bgnd = indx.find('tr', class_='backgr')
            if bgnd:
                for row in bgnd.next_siblings:
                    td2 = row.contents[1]
                    td5 = row.contents[-1]

                    item = {}
                    topic_a = td2.find_all('a')
                    if topic_a:
                        topic_a = topic_a[-1]
                        item['a'] = topic_a

                    dl_a = td2.find('a', class_='downgif')
                    if dl_a:
                        item['dl_link'] = dl_a['href']

                    span_green = td5.find('span', class_='green')
                    if span_green:
                        item['seeds'] = span_green.get_text().strip()

                    self._items.append(item.copy())
예제 #3
0
    def process_page(self, url):
        request = self._s.get(url)
        self.soup = BeautifulSoup(clean_html(request.text), 'html.parser')
        debug(url)

        for tbl in self.soup.select('table.pline'):
            self._items.append(tbl)
예제 #4
0
    def process_page(self, url):
        request = self._s.get(url)
        self.soup = BeautifulSoup(clean_html(request.text), 'html.parser')
        debug(url)

        tbl = self.soup.find('table', class_='tablesorter')
        if tbl:
            tbody = tbl.find('tbody')
            if tbody:
                for tr in tbody.find_all('tr'):
                    item = {}
                    cat_a = tr.find('a', class_='gen')
                    if cat_a:
                        item['category'] = cat_a['href']
                    topic_a = tr.find('a', class_='topictitle')
                    if topic_a:
                        item['a'] = topic_a
                    dl_a = tr.find('a', attrs={'rel': "nofollow"})
                    if dl_a:
                        item['dl_link'] = dl_a['href']

                    seeds_td = tr.find('td', attrs={'title': "Seeders"})
                    if seeds_td:
                        item['seeds'] = seeds_td.get_text()

                    self._items.append(item.copy())
예제 #5
0
	def process_page(self, url):
		request = self._s.get(real_url(url))
		self.soup = BeautifulSoup(clean_html(request.text), 'html.parser')
		debug(url)

		for tbl in self.soup.select('table.pline'):
			self._items.append(tbl)
예제 #6
0
def download_torrent(url, path, settings):
    from base import save_hashes
    save_hashes(path)
    import shutil
    url = urllib2.unquote(url)
    debug('download_torrent:' + url)

    href = None
    link = None  # find_direct_link(url, settings)
    if link is None:
        s = create_session(settings)
        page = s.get(url)
        # debug(page.text.encode('cp1251'))

        soup = BeautifulSoup(clean_html(page.text), 'html.parser')
        a = soup.select('td.gensmall > span.genmed > b > a')
        if len(a) > 0:
            href = 'http://nnm-club.me/forum/' + a[0]['href']
    else:
        href = linkd
        response = urllib2.urlopen(real_url(link, settings))
        #CHUNK = 256 * 1024
        with filesystem.fopen(path, 'wb') as f:
            shutil.copyfileobj(response, f)
        save_hashes(path)
        return True

        #r = requests.head(link)
        #debug(r.headers)
        #return False

    if href:

        def make_req():
            if link:
                return requests.get(real_url(link, settings), verify=False)
            else:
                return s.get(href,
                             headers={'Referer': real_url(url, settings)})

        try:
            r = make_req()
            if not r.ok and r.status_code == 502:
                import time
                time.sleep(1)
                r = make_req()

            if 'Content-Type' in r.headers:
                if not 'torrent' in r.headers['Content-Type']:
                    return False

            with filesystem.fopen(path, 'wb') as torr:
                for chunk in r.iter_content(100000):
                    torr.write(chunk)
            save_hashes(path)
            return True
        except:
            pass

    return False
예제 #7
0
	def Actors(self):
		if self.actors is not None:
			return self.actors

		self.actors = []

		if self.kinopoisk_url:
			cast_url = self.kinopoisk_url + 'cast/'
			r = self._http_get(cast_url)
			if r.status_code == requests.codes.ok:
				soup = BeautifulSoup(base.clean_html(r.text), 'html.parser')
				for a in soup.select('a[name="actor"]'):
					for sibling in a.next_siblings:
						if not hasattr(sibling, 'tag'):
							continue
						if sibling.tag == 'a':
							return self.actors
						for actorInfo in sibling.select('.actorInfo'):
							photo 		= actorInfo.select('div.photo a')[0]['href']
							#http://st.kp.yandex.net/images/actor_iphone/iphone360_30098.jpg
							#/name/7627/
							photo 		= photo.replace('/', '').replace('name', '')
							photo 		= 'http://st.kp.yandex.net/images/actor_iphone/iphone360_' + photo + '.jpg'
							ru_name		= actorInfo.select('div.info .name a')[0].get_text()
							en_name		= actorInfo.select('div.info .name span')[0].get_text()
							role		= actorInfo.select('div.info .role')[0].get_text().replace('... ', '')
							role 		= role.split(',')[0]
							self.actors.append({'photo': photo,'ru_name': ru_name,'en_name': en_name,'role': role})
		return self.actors
예제 #8
0
    def get_google_cache(self, url):
        import urllib
        search_url = "http://www.google.com/search?q=" + urllib.quote_plus(url)
        headers = {'user-agent': user_agent}

        r = self.session.get(search_url, headers=headers, timeout=2.0)

        try:
            soup = BeautifulSoup(base.clean_html(r.text), 'html.parser')
            a = soup.find('a', class_='fl')
            if a:
                cache_url = a['href']

                import urlparse
                res = urlparse.urlparse(cache_url)
                res = urlparse.ParseResult(
                    res.scheme if res.scheme else 'https', res.netloc
                    if res.netloc else 'webcache.googleusercontent.com',
                    res.path, res.params, res.query, res.fragment)
                cache_url = urlparse.urlunparse(res)

                #print cache_url
                r = self.session.get(cache_url, headers=headers, timeout=2.0)

                indx = r.text.find('<html')

                resp = Object()
                resp.status_code = r.status_code
                resp.text = r.text[indx:]

                return resp
        except BaseException as e:
            debug(str(e))

        return requests.Response()
예제 #9
0
def download_torrent(url, path, settings):
	url = urllib2.unquote(url)
	debug('download_torrent:' + url)

	page = requests.get(real_url(url, settings))
	# debug(page.text.encode('cp1251'))

	soup = BeautifulSoup(clean_html(page.text), 'html.parser')
	a = soup.select('#download > a')
	if len(a) > 1:
		link = a[1]['href']
	else:
		link = None

	if link:
		r = requests.get(real_url(link, settings))

		debug(r.headers)

		if 'Content-Type' in r.headers:
			if not 'torrent' in r.headers['Content-Type']:
				return False

		try:
			with filesystem.fopen(path, 'wb') as torr:
				for chunk in r.iter_content(100000):
					torr.write(chunk)
			return True
		except:
			pass

	return False
예제 #10
0
def get_magnet_link(url):
	r = requests.get(real_url(url))
	if r.status_code == requests.codes.ok:
		soup = BeautifulSoup(clean_html(r.text), 'html.parser')
		for a in soup.select('a[href*="magnet:"]'):
			debug(a['href'])
			return a['href']
	return None
예제 #11
0
def get_magnet_link(url):
    r = requests.get(real_url(url, settings))
    if r.status_code == requests.codes.ok:
        soup = BeautifulSoup(clean_html(r.text), 'html.parser')
        for a in soup.select('a[href*="magnet:"]'):
            debug(a['href'])
            return a['href']
    return None
예제 #12
0
    def __init__(self, imdb_id):
        headers = {'Accept-Language': 'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7'}

        resp = requests.get('http://www.imdb.com/title/' + imdb_id + '/',
                            headers=headers)
        if resp.status_code == requests.codes.ok:
            text = base.clean_html(resp.content)
            self.page = BeautifulSoup(text, 'html.parser')
예제 #13
0
	def parse_description(self, html_text):
		self.soup = BeautifulSoup(clean_html(html_text), 'html.parser')

		tag = u''
		self._dict['gold'] = False
		for a in self.soup.select('img[src="images/gold.gif"]'):
			self._dict['gold'] = True
			debug('gold')

		for span in self.soup.select('span.postbody span'):
			try:
				text = span.get_text()
				tag = self.get_tag(text)
				if tag != '':
					if tag != u'plot':
						self._dict[tag] = base.striphtml(unicode(span.next_sibling).strip())
					else:
						self._dict[tag] = base.striphtml(unicode(span.next_sibling.next_sibling).strip())
					debug('%s (%s): %s' % (text.encode('utf-8'), tag.encode('utf-8'), self._dict[tag].encode('utf-8')))
			except:
				pass
		if 'genre' in self._dict:
			self._dict['genre'] = self._dict['genre'].lower().replace('.', '')

		count_id = 0
		for a in self.soup.select('a[href*="www.imdb.com/title/"]'):
			try:
				href = a['href']

				components = href.split('/')
				if components[2] == u'www.imdb.com' and components[3] == u'title':
					self._dict['imdb_id'] = components[4]
					count_id += 1
			except:
				pass

		if count_id > 1:
			return False

		for img in self.soup.select('var.postImg'):  # ('img.postImg'):
			try:
				self._dict['thumbnail'] = img['title']
				debug('!!!!!!!!!!!!!!thumbnail: ' + self._dict['thumbnail'])
				break
			except:
				pass

		self.parse_country_studio()

		if self.settings:
			if self.settings.use_kinopoisk:
				for kp_id in self.soup.select('#kp_id'):
					self._dict['kp_id'] = kp_id['href']

		self.make_movie_api(self.get_value('imdb_id'), self.get_value('kp_id'))

		return True
예제 #14
0
	def Trailer(self):
		if self.kinopoisk_url:
			trailer_page = self.kinopoisk_url + 'video/type/1/'
			r = self._http_get(trailer_page)
			if r.status_code == requests.codes.ok:
				soup = BeautifulSoup(base.clean_html(r.text), 'html.parser')
				for div in soup.select('tr td div div.flag2'):
					trailer = self.__trailer(div)
					if trailer:
						return trailer
				for a in soup.select('a.all'):
					return self.__trailer(a)
		return None
예제 #15
0
	def getTitle(self):
		title = None
		if self.kinopoisk_url and self.soup is None:
			r = self._http_get(self.kinopoisk_url)
			if r.status_code == requests.codes.ok:
				self.soup = BeautifulSoup(base.clean_html(r.text), 'html.parser')

		if self.soup:
			h = self.soup.find('h1', class_ = 'moviename-big')
			if h:
				title = h.contents[0].strip()

		return title
예제 #16
0
def download_torrent(url, path, settings):
	import shutil
	url = urllib2.unquote(url)
	debug('download_torrent:' + url)

	href = None
	link = find_direct_link(url, settings)
	if link is None:
		s = create_session(settings)
		page = s.get(real_url(url))
		# debug(page.text.encode('cp1251'))

		soup = BeautifulSoup(clean_html(page.text), 'html.parser')
		a = soup.select('td.gensmall > span.genmed > b > a')
		if len(a) > 0:
			href = 'http://nnm-club.me/forum/' + a[0]['href']
		debug(s.headers)
	else:
		href = link
		response = urllib2.urlopen(real_url(link))
		#CHUNK = 256 * 1024
		with filesystem.fopen(path, 'wb') as f:
			shutil.copyfileobj(response, f)
			return True

		#r = requests.head(link)
		#debug(r.headers)
		#return False


	if href:
		if link:
			r = requests.get(real_url(link))
		else:
			r = s.get(real_url(href), headers={'Referer': real_url(url)}, verify=False)
		debug(r.headers)

		# 'Content-Type': 'application/x-bittorrent'
		if 'Content-Type' in r.headers:
			if not 'torrent' in r.headers['Content-Type']:
				return False

		try:
			with filesystem.fopen(path, 'wb') as torr:
				for chunk in r.iter_content(100000):
					torr.write(chunk)
			return True
		except:
			pass

	return False
예제 #17
0
def create_session(settings):
	s = requests.Session()

	r = s.get(real_url("http://nnm-club.me/forum/login.php"))

	soup = BeautifulSoup(clean_html(r.text), 'html.parser')

	for inp in soup.select('input[name="code"]'):
		code = inp['value']
	# debug(code)

	data = {"username": settings.nnmclub_login, "password": settings.nnmclub_password,
			"autologin": "******", "code": code, "redirect": "", "login": ""}
	login = s.post(real_url("http://nnm-club.me/forum/login.php"), data=data,
				   headers={'Referer': real_url("http://nnm-club.me/forum/login.php")})
	debug('Login status: %d' % login.status_code)

	return s
예제 #18
0
    def trailer(self):
        if self.kinopoisk_url:
            trailer_page = self.kinopoisk_url + 'video/type/1/'
            r = self._http_get(trailer_page)
            if r.status_code == requests.codes.ok:
                text = base.clean_html(r.text)
                soup = BeautifulSoup(text, 'html.parser')

                if not soup:
                    return None

                for div in soup.select('tr td div div.flag2'):
                    trailer = self.__trailer(div)
                    if trailer:
                        return trailer
                for a in soup.select('a.all'):
                    return self.__trailer(a)
        return None
예제 #19
0
def get_passkey(settings=None, session=None):
	if session is None and settings is None:
		return None

	if session is None:
		session = create_session(settings)

	page = session.get(real_url('http://nnm-club.me/forum/profile.php?mode=editprofile'))

	soup = BeautifulSoup(clean_html(page.text), 'html.parser')

	next = False
	for span in soup.select('span.gen'):
		if next:
			return span.get_text()
		if span.get_text() == u'Текущий passkey:':
			next = True

	return None
예제 #20
0
def get_passkey(settings=None, session=None):
    if session is None and settings is None:
        return None

    if session is None:
        session = create_session(settings)

    page = session.get('http://nnm-club.me/forum/profile.php?mode=editprofile')

    soup = BeautifulSoup(clean_html(page.text), 'html.parser')

    next = False
    for span in soup.select('span.gen'):
        if next:
            return span.get_text()
        if span.get_text() == u'Текущий passkey:':
            next = True

    return None
예제 #21
0
def get_uid(settings, session=None):
	if session is None:
		session = create_session(settings)
	try:
		page = session.get(real_url('http://nnm-club.me/'))
		if page.status_code == requests.codes.ok:
			soup = BeautifulSoup(clean_html(page.text), 'html.parser')
			'''
			a = soup.select_one('a[href*="profile.php"]')
			if a is None:
				return None
			'''
			for a in soup.select('a.mainmenu'):
				m = re.search('profile.php.+?u=(\d+)', a['href'])
				if m:
					return m.group(1)
		else:
			debug('page.status_code: ' + str(page.status_code))
	except BaseException as e:
		log.print_tb(e)
		pass

	return None
예제 #22
0
def get_uid(settings, session=None):
    if session is None:
        session = create_session(settings)
    try:
        page = session.get('http://nnm-club.me/')
        if page.status_code == requests.codes.ok:
            soup = BeautifulSoup(clean_html(page.text), 'html.parser')
            '''
			a = soup.select_one('a[href*="profile.php"]')
			if a is None:
				return None
			'''
            for a in soup.select('a.mainmenu'):
                m = re.search('profile.php.+?u=(\d+)', a['href'])
                if m:
                    return m.group(1)
        else:
            debug('page.status_code: ' + str(page.status_code))
    except BaseException as e:
        log.print_tb(e)
        pass

    return None
예제 #23
0
def download_torrent(url, path, settings):
    from base import save_hashes
    save_hashes(path)

    url = urllib2.unquote(url)
    debug('download_torrent:' + url)

    page = requests.get(real_url(url, settings))

    soup = BeautifulSoup(clean_html(page.text), 'html.parser')
    a = soup.select('#download > a')
    if len(a) > 1:
        link = a[1]['href']
    else:
        link = None

    if link:
        r = requests.get(real_url(link, settings))

        debug(r.headers)

        if 'Content-Type' in r.headers:
            if not 'torrent' in r.headers['Content-Type']:
                return False

        try:
            with filesystem.fopen(path, 'wb') as torr:
                for chunk in r.iter_content(100000):
                    torr.write(chunk)

            save_hashes(path)
            return True
        except:
            pass

    return False
예제 #24
0
    def actors(self):
        if self._actors is not None:
            return self._actors

        self._actors = []

        if self.kinopoisk_url:
            cast_url = self.kinopoisk_url + 'cast/'
            r = self._http_get(cast_url)
            if r.status_code == requests.codes.ok:
                text = base.clean_html(r.text)
                soup = BeautifulSoup(text, 'html.parser')

                if not soup:
                    return []

                for actorInfo in soup.find_all('div', class_='actorInfo'):
                    photo = actorInfo.select('div.photo a')[0]['href']
                    #http://st.kp.yandex.net/images/actor_iphone/iphone360_30098.jpg
                    #/name/7627/
                    photo = photo.replace('/', '').replace('name', '')
                    photo = 'http://st.kp.yandex.net/images/actor_iphone/iphone360_' + photo + '.jpg'
                    ru_name = actorInfo.select(
                        'div.info .name a')[0].get_text()
                    en_name = actorInfo.select(
                        'div.info .name span')[0].get_text()
                    role = actorInfo.select(
                        'div.info .role')[0].get_text().replace('... ', '')
                    role = role.split(',')[0]
                    self._actors.append({
                        'photo': photo,
                        'ru_name': ru_name,
                        'en_name': en_name,
                        'role': role
                    })
        return self._actors
예제 #25
0
	def parse_description(self, html_text):
		from HTMLParser import HTMLParseError

		html_text = clean_html(html_text)
		try:
			self.soup = BeautifulSoup(html_text, 'html.parser')
		except HTMLParseError as e:
			log.print_tb(e)
			log.debug(html_text)
			return False

		tag = u''

		for b in self.soup.select('#details b'):
			try:
				text = b.get_text()
				tag = self.get_tag(text)
				if tag == 'plot':
					self._dict[tag] = base.striphtml(unicode(b.next_sibling.next_sibling).strip())
					debug('%s (%s): %s' % (text.encode('utf-8'), tag.encode('utf-8'), self._dict[tag].encode('utf-8')))
				elif tag == 'genre':
					genres = []
					elements = b.findNextSiblings('a')
					for a in elements:
						if '/tag/' in a['href']:
							genres.append(a.get_text())

					self._dict[tag] = u', '.join(genres)

				elif tag != '':
					self._dict[tag] = base.striphtml(unicode(b.next_sibling).strip())
					debug('%s (%s): %s' % (text.encode('utf-8'), tag.encode('utf-8'), self._dict[tag].encode('utf-8')))
			except:
				pass
		if 'genre' in self._dict:
			self._dict['genre'] = self._dict['genre'].lower().replace('.', '')


		for tag in [u'title', u'year', u'genre', u'director', u'actor', u'plot']:
			if tag not in self._dict:
				return False
		
		count_id = 0
		for a in self.soup.select('a[href*="www.imdb.com/title/"]'):
			try:
				href = a['href']

				components = href.split('/')
				if components[2] == u'www.imdb.com' and components[3] == u'title':
					self._dict['imdb_id'] = components[4]
					count_id += 1
			except:
				pass

		if count_id == 0:
			div_index = self.soup.select('#index')
			if div_index:
				for a in div_index[0].findAll('a', recursive=True):
					if '/torrent/' in a['href']:
						parts = a['href'].split('/')
						href = parts[0] + '/' + parts[1] + '/' + parts[2]
						html = urllib2.urlopen(real_url(href, settings))
						soup = BeautifulSoup(clean_html(html.read()), 'html.parser')

						for a in soup.select('a[href*="www.imdb.com/title/"]'):
							try:
								href = a['href']

								components = href.split('/')
								if components[2] == u'www.imdb.com' and components[3] == u'title':
									self._dict['imdb_id'] = components[4]
									count_id += 1
							except:
								pass

					if 'imdb_id' in self._dict:
						break

		if count_id > 1:
			return False

		if 'imdb_id' not in self._dict:
			return False

		for det in self.soup.select('#details'):
			tr = det.find('tr', recursive=False)
			if tr:
				tds = tr.findAll('td', recursive=False)
				if len(tds) > 1:
					td = tds[1]
					img = td.find('img')
					try:
						self._dict['thumbnail'] = img['src']
						debug('!!!!!!!!!!!!!!thumbnail: ' + self._dict['thumbnail'])
						break
					except:
						pass

		if self.settings:
			if self.settings.use_kinopoisk:
				for kp_id in self.soup.select('a[href*="www.kinopoisk.ru/"]'):
					self._dict['kp_id'] = kp_id['href']

		self.make_movie_api(self.get_value('imdb_id'), self.get_value('kp_id'))

		return True
예제 #26
0
 def makeSoup(self):
     if self.kinopoisk_url and self.soup is None:
         r = self._http_get(self.kinopoisk_url)
         if r.status_code == requests.codes.ok:
             text = base.clean_html(r.text)
             self.soup = BeautifulSoup(text, 'html.parser')
예제 #27
0
    def parse_description(self, html_text):
        self.soup = BeautifulSoup(clean_html(html_text), 'html.parser')

        tag = u''
        self._dict['gold'] = False
        for a in self.soup.select('img[src="images/gold.gif"]'):
            self._dict['gold'] = True
            debug('gold')

        for span in self.soup.select('.postbody span'):
            try:
                text = span.get_text()
                tag = self.get_tag(text)
                if tag != '':
                    if tag != u'plot':
                        self._dict[tag] = base.striphtml(
                            unicode(span.next_sibling).strip())
                    else:
                        self._dict[tag] = base.striphtml(
                            unicode(span.next_sibling.next_sibling).strip())
                    debug('%s (%s): %s' %
                          (text.encode('utf-8'), tag.encode('utf-8'),
                           self._dict[tag].encode('utf-8')))
            except:
                pass
        if 'genre' in self._dict:
            self._dict['genre'] = self._dict['genre'].replace('.', '')

        count_id = 0
        for a in self.soup.select('a[href*="www.imdb.com/title/"]'):
            try:
                href = a['href']

                components = href.split('/')
                if components[2] == u'www.imdb.com' and components[
                        3] == u'title':
                    self._dict['imdb_id'] = components[4]
                    count_id += 1
            except:
                pass

        if count_id > 1:
            return False

        img = self.soup.find('var', class_='postImg')
        if img:
            try:
                self._dict['thumbnail'] = img['title'].split('?link=')[-1]
                debug('!!!!!!!!!!!!!!thumbnail: ' + self._dict['thumbnail'])
            except:
                pass

        if 'thumbnail' not in self._dict:
            imgs = self.soup.select('span.postbody > img')
            try:
                self._dict['thumbnail'] = imgs[0]['src'].split('?link=')[-1]
                debug('!!!!!!!!!!!!!!thumbnail: ' + self._dict['thumbnail'])
            except BaseException as e:
                pass

        self.parse_country_studio()

        try:
            kp = self.soup.select_one('div.kpi a')
        except TypeError:
            kp = None
        if not kp:
            try:
                kp = self.soup.select_one('#kp_id')
            except TypeError:
                kp = None
        if kp:
            self._dict['kp_id'] = kp['href']

        self.make_movie_api(self.get_value('imdb_id'), self.get_value('kp_id'),
                            self.settings)

        return True
예제 #28
0
    def parse_description(self, html_text):
        from HTMLParser import HTMLParseError

        html_text = clean_html(html_text)
        try:
            self.soup = BeautifulSoup(html_text, 'html.parser')
        except HTMLParseError as e:
            log.print_tb(e)
            log.debug(html_text)
            return False

        tag = u''

        for b in self.soup.select('#details b'):
            try:
                text = b.get_text()
                tag = self.get_tag(text)
                if tag == 'plot':
                    plot = base.striphtml(
                        unicode(b.next_sibling.next_sibling).strip())
                    if plot:
                        self._dict[tag] = plot
                        debug('%s (%s): %s' %
                              (text.encode('utf-8'), tag.encode('utf-8'),
                               self._dict[tag].encode('utf-8')))
                elif tag == 'genre':
                    genres = []
                    elements = b.findNextSiblings('a')
                    for a in elements:
                        if '/tag/' in a['href']:
                            genres.append(a.get_text())

                    self._dict[tag] = u', '.join(genres)

                elif tag != '':
                    self._dict[tag] = base.striphtml(
                        unicode(b.next_sibling).strip())
                    debug('%s (%s): %s' %
                          (text.encode('utf-8'), tag.encode('utf-8'),
                           self._dict[tag].encode('utf-8')))
            except:
                pass

        tags = []
        for tag in [
                u'title', u'year', u'genre', u'director', u'actor', u'plot'
        ]:
            if tag not in self._dict:
                tags.append(tag)

        if tags:
            try:
                details = self.soup.select_one('#details').get_text()
                lines = details.split('\n')
                for l in lines:
                    if ':' in l:
                        key, desc = l.split(':', 1)
                        key = key.strip(u' \r\n\t✦═')
                        desc = desc.strip(u' \r\n\t')

                        tag = self.get_tag(key + ':')
                        if tag and desc and tag not in self._dict:
                            self._dict[tag] = desc
            except BaseException as e:
                debug('No parse #details')
                debug(e)
                pass

        if 'genre' in self._dict:
            self._dict['genre'] = self._dict['genre'].lower().replace('.', '')

        if 'video' in self._dict:
            self._dict['video'] = self._dict['video'].replace('|', ',')

            if self.settings.rutor_nosd:
                video = self._dict['video']
                parts = video.split(',')

                for part in parts:
                    part = part.strip()

                    if 'XviD' in part:
                        return False

                    m = re.search(ur'(\d+)[xXхХ](\d+)', part)
                    if m:
                        w = int(m.group(1))
                        #h = int(m.group(2))
                        if w < 1280:
                            return False
        else:
            pass

        count_id = 0
        for a in self.soup.select('a[href*="www.imdb.com/title/"]'):
            try:
                href = a['href']

                components = href.split('/')
                if components[2] == u'www.imdb.com' and components[
                        3] == u'title':
                    self._dict['imdb_id'] = components[4]
                    count_id += 1
            except:
                pass

        if count_id == 0:
            div_index = self.soup.select('#index')
            if div_index:
                for a in div_index[0].findAll('a', recursive=True):
                    if '/torrent/' in a['href']:
                        parts = a['href'].split('/')
                        href = parts[0] + '/' + parts[1] + '/' + parts[2]
                        html = urllib2.urlopen(real_url(href, self.settings))
                        soup = BeautifulSoup(clean_html(html.read()),
                                             'html.parser')

                        for a in soup.select('a[href*="www.imdb.com/title/"]'):
                            try:
                                href = a['href']

                                components = href.split('/')
                                if components[
                                        2] == u'www.imdb.com' and components[
                                            3] == u'title':
                                    self._dict['imdb_id'] = components[4]
                                    count_id += 1
                            except:
                                pass

                    if 'imdb_id' in self._dict:
                        break

        if count_id > 1:
            return False

        if 'imdb_id' not in self._dict:
            if not hasattr(self.settings, 'no_skip_by_imdb'):
                return False

        for det in self.soup.select('#details'):
            tr = det.find('tr', recursive=False)
            if tr:
                tds = tr.findAll('td', recursive=False)
                if len(tds) > 1:
                    td = tds[1]
                    img = td.find('img')
                    try:
                        self._dict['thumbnail'] = img['src']
                        debug('!!!!!!!!!!!!!!thumbnail: ' +
                              self._dict['thumbnail'])
                        break
                    except:
                        pass

        for kp_id in self.soup.select('a[href*="www.kinopoisk.ru/"]'):
            self._dict['kp_id'] = kp_id['href']

        self.make_movie_api(self.get_value('imdb_id'), self.get_value('kp_id'),
                            self.settings)

        return True
예제 #29
0
def create_session(settings):
    try:
        return settings.session
    except AttributeError:
        s = requests.Session()

        cookies = None
        if settings.nnmclub_use_ssl:
            cookies = dict(ssl='enable_ssl')

        r = s.get(real_url("http://nnm-club.me/forum/login.php", settings),
                  verify=False)

        soup = BeautifulSoup(clean_html(r.text), 'html.parser')

        code = ''
        for inp in soup.select('input[name="code"]'):
            code = inp['value']
        # debug(code)

        data = {
            "username": settings.nnmclub_login,
            "password": settings.nnmclub_password,
            "autologin": "******",
            "code": code,
            "redirect": "",
            "login": ""
        }
        login = s.post(real_url("http://nnm-club.me/forum/login.php",
                                settings),
                       data=data,
                       verify=False,
                       cookies=cookies,
                       headers={
                           'Referer':
                           real_url("http://nnm-club.me/forum/login.php",
                                    settings)
                       })
        debug('Login status: %d' % login.status_code)

        class MySession():
            def __init__(self, session, settings):
                self.session = session
                self.settings = settings

            def _prepare(self, kwargs):
                if settings.nnmclub_use_ssl:
                    kwargs['verify'] = False
                kwargs['cookies'] = cookies

            def get(self, url, **kwargs):
                self._prepare(kwargs)
                return self.session.get(real_url(url, self.settings), **kwargs)

            def post(self, url, **kwargs):
                self._prepare(kwargs)
                return self.session.post(real_url(url, self.settings),
                                         **kwargs)

        s = MySession(s, settings)

        settings.session = s

        return s