def download_torrent(url, path, settings):
    from base import save_hashes
    save_hashes(path)
    import shutil
    url = urllib2.unquote(url)
    debug('download_torrent:' + url)

    href = None
    link = None  # find_direct_link(url, settings)
    if link is None:
        s = create_session(settings)
        page = s.get(url)
        # debug(page.text.encode('cp1251'))

        soup = BeautifulSoup(clean_html(page.text), 'html.parser')
        a = soup.select('td.gensmall > span.genmed > b > a')
        if len(a) > 0:
            href = 'http://nnm-club.me/forum/' + a[0]['href']
    else:
        href = linkd
        response = urllib2.urlopen(real_url(link, settings))
        #CHUNK = 256 * 1024
        with filesystem.fopen(path, 'wb') as f:
            shutil.copyfileobj(response, f)
        save_hashes(path)
        return True

        #r = requests.head(link)
        #debug(r.headers)
        #return False

    if href:

        def make_req():
            if link:
                return requests.get(real_url(link, settings), verify=False)
            else:
                return s.get(href,
                             headers={'Referer': real_url(url, settings)})

        try:
            r = make_req()
            if not r.ok and r.status_code == 502:
                import time
                time.sleep(1)
                r = make_req()

            if 'Content-Type' in r.headers:
                if not 'torrent' in r.headers['Content-Type']:
                    return False

            with filesystem.fopen(path, 'wb') as torr:
                for chunk in r.iter_content(100000):
                    torr.write(chunk)
            save_hashes(path)
            return True
        except:
            pass

    return False
def add_media(title, imdb):
	path = filesystem.join(addon_data_path(), 'add_media')
	log.debug(path)

	# if not filesystem.exists(path):
	# 	with filesystem.fopen(path, 'w'):
	# 		pass

	if filesystem.exists(path):
		with filesystem.fopen(path, 'r') as f:
			s = f.read()
			if imdb.encode('utf-8') in s:
				return

	with filesystem.fopen(path, 'a+') as f:
		log.debug('writing...')
		seq = [title.encode('utf-8') + '\n', imdb.encode('utf-8') + '\n']
		f.writelines(seq)

	ended_path = filesystem.join(addon_data_path(), imdb + '.ended')
	for cnt in range(300):

		if filesystem.exists(ended_path):
			with filesystem.fopen(ended_path, 'r') as f:
				dlg = xbmcgui.Dialog()

				count = f.read()

				try:
					count = int(count)
				except BaseException:
					count = 0

				if count:
					dlg.notification(u'Media Aggregator', u'"%s" добавлено в библиотеку, найдено %d источников.' % (title, count), time=10000)

					url = 'plugin://script.media.aggregator/?' + urllib.urlencode(
						{'action': 'add_media',
						 'title': title.encode('utf-8'),
						 'imdb': imdb,
						 'norecursive': True})

					xbmc.executebuiltin('RunPlugin("%s")' % url)
				else:
					dlg.notification(u'Media Aggregator',
					                 u'"%s" не добавлено в библиотеку, Источники не найдены.' % title,
					                 time=10000)
			filesystem.remove(ended_path)

			break

		sleep(1)
	def write(self, filename, path, seasonNumber = None, episodeNumber = None, cutname = None, index = None, parser = None, settings = None):
		strmFilename = make_fullpath(filename, u'.strm')
		strmFilename = filesystem.join(path, strmFilename)
		
		#------------------------------------------

		link = u'plugin://script.media.aggregator/?action=play&torrent='
		link += urllib2.quote(self.link.encode('utf-8'))
		if episodeNumber != None:
			link += u'&episodeNumber=' + str(episodeNumber - 1)
		if seasonNumber != None:
			link += u'&seasonNumber=' + str(seasonNumber)
		if cutname != None:
			link += u'&cutName=' + urllib2.quote(cutname)
		if index != None:
			link += u'&index=' + str(index)

		#------------------------------------------
		if parser is not None:
			self.make_alternative(strmFilename, link, parser)
			# rank = get_rank(parser.get('full_title', ''), parser, settings),
			# debug('rank: ' + str(rank))
		
			link_with_min_rank = STRMWriterBase.get_link_with_min_rank(strmFilename, settings)
			if not link_with_min_rank is None:
				link = link_with_min_rank
				
		#------------------------------------------
			
		link += u'&nfo=' + urllib2.quote(make_fullpath(filename, '.nfo').encode('utf-8'))
		
		#------------------------------------------
		if settings != None:
			path = filesystem.relpath(path, settings.base_path())
			debug(path.encode('utf-8'))
			link += u'&path=' + urllib2.quote(path.encode('utf-8'))

		#------------------------------------------
		if filesystem.exists(strmFilename):
			with filesystem.fopen(strmFilename, 'r') as f:
				old_link = f.read()
				if old_link.decode('utf-8') == link:
					return
		
		#------------------------------------------
		try:
			with filesystem.fopen(strmFilename, 'w') as f:
				f.write(link.encode('utf-8'))
		except IOError:
			debug('Error write ' + strmFilename.encode('utf-8'))
			return
def download_torrent(url, path, settings):
	import shutil
	url = urllib2.unquote(url)
	debug('download_torrent:' + url)

	href = None
	link = find_direct_link(url, settings)
	if link is None:
		s = create_session(settings)
		page = s.get(real_url(url))
		# debug(page.text.encode('cp1251'))

		soup = BeautifulSoup(clean_html(page.text), 'html.parser')
		a = soup.select('td.gensmall > span.genmed > b > a')
		if len(a) > 0:
			href = 'http://nnm-club.me/forum/' + a[0]['href']
		debug(s.headers)
	else:
		href = link
		response = urllib2.urlopen(real_url(link))
		#CHUNK = 256 * 1024
		with filesystem.fopen(path, 'wb') as f:
			shutil.copyfileobj(response, f)
			return True

		#r = requests.head(link)
		#debug(r.headers)
		#return False


	if href:
		if link:
			r = requests.get(real_url(link))
		else:
			r = s.get(real_url(href), headers={'Referer': real_url(url)}, verify=False)
		debug(r.headers)

		# 'Content-Type': 'application/x-bittorrent'
		if 'Content-Type' in r.headers:
			if not 'torrent' in r.headers['Content-Type']:
				return False

		try:
			with filesystem.fopen(path, 'wb') as torr:
				for chunk in r.iter_content(100000):
					torr.write(chunk)
			return True
		except:
			pass

	return False
	def write(self, filename, seasonNumber = None, episodeNumber = None, cutname = None, index = None, parser = None, settings = None):
		strmFilename = make_fullpath(filename, u'.strm')
		
		#------------------------------------------

		link = u'plugin://script.media.aggregator/?action=play&torrent='
		link += urllib2.quote(self.link.encode('utf-8'))
		if episodeNumber != None:
			link += u'&episodeNumber=' + str(episodeNumber - 1)
		if seasonNumber != None:
			link += u'&seasonNumber=' + str(seasonNumber)
		if cutname != None:
			link += u'&cutName=' + urllib2.quote(cutname)
		if index != None:
			link += u'&index=' + str(index)

		#------------------------------------------
		if parser is not None:
			self.make_alternative(strmFilename, link, parser)
			# rank = get_rank(parser.get('full_title', ''), parser, settings),
			# debug('rank: ' + str(rank))
		
			link_with_min_rank = STRMWriterBase.get_link_with_min_rank(strmFilename, settings)
			if not link_with_min_rank is None:
				link = link_with_min_rank
				
		#------------------------------------------
			
		link += u'&nfo=' + urllib2.quote(make_fullpath(filename, '.nfo').encode('utf-8'))
		
		#------------------------------------------
		if settings != None:
			path = filesystem.relpath(filesystem.getcwd(), settings.base_path())
			debug(path.encode('utf-8'))
			link += u'&path=' + urllib2.quote(path.encode('utf-8'))

		#------------------------------------------
		if filesystem.exists(strmFilename):
			with filesystem.fopen(strmFilename, 'r') as f:
				old_link = f.read()
				if old_link.decode('utf-8') == link:
					return
		
		#------------------------------------------
		try:
			with filesystem.fopen(strmFilename, 'w') as f:
				f.write(link.encode('utf-8'))
		except IOError:
			debug('Error write ' + strmFilename.encode('utf-8'))
			return
Exemple #6
0
    def __init__(self):
        self.use_mysql = False
        self.dict.clear()

        path = xbmc.translatePath(
            'special://profile/advancedsettings.xml').decode('utf-8')
        self.LOG(path)
        if not filesystem.exists(path):
            return

        try:
            with filesystem.fopen(path, 'r') as f:
                content = f.read()
                log.debug(content)
                root = ET.fromstring(content)
        except IOError as e:
            self.LOG("I/O error({0}): {1}".format(e.errno, e.strerror))
            return
        except BaseException as e:
            self.LOG("error: " + str(e))
            return

        for section in root:
            if section.tag == 'videodatabase':
                for child in section:
                    if child.tag in [
                            'type', 'host', 'port', 'user', 'pass', 'name'
                    ]:
                        self.dict[child.tag] = child.text
                        log.debug(child.text)
                self.LOG('<videodatabase> found')
                return

        self.LOG('<videodatabase> not found')
    def load(self, path):
        try:
            with filesystem.fopen(path, 'r') as f:
                content = f.read()
                #log.debug(content)
                root = ET.fromstring(content)
        except IOError as e:
            self.LOG("I/O error({0}): {1}".format(e.errno, e.strerror))
            return
        except BaseException as e:
            self.LOG("error: " + str(e))
            return

        for section in root:
            if section.tag == 'videodatabase':
                for child in section:
                    if child.tag in [
                            'type', 'host', 'port', 'user', 'pass', 'name'
                    ]:
                        self.dict[child.tag] = child.text
                        log.debug(child.text)
                self.LOG('<videodatabase> found')
                return

        self.LOG('<videodatabase> not found')
    def setSetting(self, id, val):
        # not work in Python 2.6
        # item = self.root.find("./setting[@id='%s']" % str(id))

        item = None
        settings = self.root.findall("setting")
        for setting in settings:
            if setting.attrib.get('id') == id:
                item = setting
                break

        if item is not None:
            item.set('value', str(val))
        else:
            ET.SubElement(self.root,
                          'setting',
                          attrib={
                              'id': str(id),
                              'value': str(val)
                          })

        with filesystem.fopen(self._addon_xml, 'w') as f:
            f.write('<settings>\n')
            for item in self.root:
                f.write('    <setting id="%s" value="%s" />\n' % (Addon._xml(
                    item.get('id')), Addon._xml(item.get('value'))))
            f.write('</settings>\n')

        self.mtime = filesystem.getmtime(self._addon_xml)
def search_results(imdb, session, settings, url, cat):
    debug("search_results: url = " + url)

    enumerator = TrackerPostsEnumerator(session)
    enumerator.process_page(url)
    result = []
    for post in enumerator.items():
        if "seeds" in post and int(post["seeds"]) < 5:
            continue

        if str(post.get("cat", "")) != str(cat):
            continue

            # full_title, content, link, settings

        page = requests.get("http://hdclub.org/" + post["a"])

        soup = BeautifulSoup(page.text)

        content = ""
        tbl = soup.find("table", class_="heading_b")

        for td in tbl.find_all("td", class_="heading_r"):
            content += td.prettify()

        with filesystem.fopen("hdclub." + imdb + ".html", "w") as html:
            html.write(content.encode("utf-8"))

        parser = DescriptionParser(post["title"], content, post["a"], settings=settings)
        debug(u"%s %s %s" % (post["title"], str(parser.parsed()), parser.get_value("imdb_id")))
        if parser.parsed():  # and parser.get_value('imdb_id') == imdb:
            result.append({"parser": parser, "link": post["dl_link"]})

    return result
Exemple #10
0
def get_tmdb_api_key():
    try:
        import filesystem
        import xbmc
        home_path = xbmc.translatePath('special://home').decode('utf-8')
    except ImportError:
        cur = filesystem.dirname(__file__)
        home_path = filesystem.join(cur, '../..')

    key = 'ecbc86c92da237cb9faff6d3ddc4be6d'
    host = 'api.tmdb.org'
    try:
        xml_path = filesystem.join(
            home_path, 'addons/metadata.common.themoviedb.org/tmdb.xml')
        with filesystem.fopen(xml_path, 'r') as xml:
            content = xml.read()
            match = re.search(r'api_key=(\w+)', content)
            if match:
                key = match.group(1)
                debug('get_tmdb_api_key: ok')

            m = re.search(r'://(.+)/3/', content)
            if m:
                host = m.group(1)

    except BaseException as e:
        debug('get_tmdb_api_key: ' + str(e))

    return {'host': host, 'key': key}
def add_media_case():
    if _addon.getSetting('role').decode('utf-8') == u'клиент':
        return

    path = filesystem.join(addon_data_path(), 'add_media')
    if filesystem.exists(path):
        try:
            with filesystem.fopen(path, 'r') as f:
                while True:
                    try:
                        title = f.readline().strip(' \n\t\r').decode('utf-8')
                        imdb = f.readline().strip(' \n\t\r')

                        log.debug('add_media_case: ' + imdb)
                        log.debug(title)

                        if title and imdb:
                            call_bg('add_media_process', {
                                'title': title,
                                'imdb': imdb
                            })
                        else:
                            break
                    except BaseException as e:
                        log.print_tb(e)
        finally:
            filesystem.remove(path)
def download_torrent(url, path, settings):
	url = urllib2.unquote(url)
	debug('download_torrent:' + url)

	page = requests.get(real_url(url, settings))
	# debug(page.text.encode('cp1251'))

	soup = BeautifulSoup(clean_html(page.text), 'html.parser')
	a = soup.select('#download > a')
	if len(a) > 1:
		link = a[1]['href']
	else:
		link = None

	if link:
		r = requests.get(real_url(link, settings))

		debug(r.headers)

		if 'Content-Type' in r.headers:
			if not 'torrent' in r.headers['Content-Type']:
				return False

		try:
			with filesystem.fopen(path, 'wb') as torr:
				for chunk in r.iter_content(100000):
					torr.write(chunk)
			return True
		except:
			pass

	return False
	def clean_movie_by_imdbid(imdbid):
		api = movieapi.MovieAPI(imdbid)
		genre = api['genres']
		if u'мультфильм' in genre:
			base_path = settings.animation_path()
		elif u'документальный' in genre:
			base_path = settings.documentary_path()
		else:
			base_path = settings.movies_path()

		mm = MoreRequests().get_movies_by_imdb(imdbid)

		from base import STRMWriterBase
		from base import Informer

		title = Informer().filename_with(api['title'], api['originaltitle'], api['year'])

		strm_path = filesystem.join(base_path, make_fullpath(title, '.strm'))
		#alt_path = strm_path + '.alternative'
		nfo_path = filesystem.join(base_path, make_fullpath(title, '.nfo'))

		strm_data = filesystem.fopen(mm[0][3], 'r').read()
		alt_data = []
		for m in mm:
			links_with_ranks = STRMWriterBase.get_links_with_ranks(mm[0][3], settings, use_scrape_info=False)
			alt_data.extend(links_with_ranks)

		alt_data = [dict(t) for t in set([tuple(d.iteritems()) for d in alt_data])]
		#alt_data = list(set(alt_data))
		#alt_data.sort(key=operator.itemgetter('rank'))
		with filesystem.save_make_chdir_context(base_path, 'STRMWriterBase.write_alternative'):
			STRMWriterBase.write_alternative(strm_path, alt_data)
		pass
def seeds_peers(item):
	import player
	res = {}
	try:
		link = urllib.unquote(item['link'])
		settings = player.load_settings()
		if 'nnm-club' in link:
			debug('seeds_peers: ' + link)
			t_id = re.search(r't=(\d+)', link).group(1)
			fn = filesystem.join(settings.torrents_path(), 'nnmclub', t_id + '.stat')
			debug(fn)
			with filesystem.fopen(fn, 'r') as stat_file:
				import json
				res = json.load(stat_file)
				debug(str(res))
		elif 'hdclub' in link:
			t_id = re.search(r'\.php.+?id=(\d+)', link).group(1)
			fn = filesystem.join(settings.torrents_path(), 'hdclub', t_id + '.torrent')
			return scrape_now(fn)
		elif 'rutor' in link:
			t_id = re.search(r'/torrent/(\d+)', link).group(1)
			fn = filesystem.join(settings.torrents_path(), 'rutor', t_id + '.torrent')
			return scrape_now(fn)

	except BaseException as e:
		debug(str(e))
	return res
Exemple #15
0
def save_hashes(torrent_path):
    hashes_path = torrent_path + '.hashes'
    if filesystem.exists(torrent_path):
        tp = TorrentPlayer()
        tp.AddTorrent(torrent_path)
        td = tp.GetLastTorrentData()
        if td:
            info_hash = td['info_hash']

            if filesystem.exists(hashes_path):
                with filesystem.fopen(hashes_path, 'r') as rf:
                    if info_hash in rf.read():
                        return

            with filesystem.fopen(hashes_path, 'a+') as wf:
                wf.write(info_hash + '\n')
Exemple #16
0
	def __init__(self):
		self.use_mysql = False
		self.dict.clear()
	
		path = xbmc.translatePath('special://profile/advancedsettings.xml').decode('utf-8')
		self.LOG(path)
		if not filesystem.exists(path):
			return

		try:
			with filesystem.fopen(path, 'r') as f:
				content = f.read()
				log.debug(content)
				root = ET.fromstring(content)
		except IOError as e:
			self.LOG("I/O error({0}): {1}".format(e.errno, e.strerror))
			return
		except BaseException as e:
			self.LOG("error: " + str(e))
			return

		for section in root:
			if section.tag == 'videodatabase':
				for child in section:
					if child.tag in ['type', 'host', 'port', 'user', 'pass', 'name']:
						self.dict[child.tag] = child.text
						log.debug(child.text)
				self.LOG('<videodatabase> found')
				return
				
		self.LOG('<videodatabase> not found')
    def change_resume_file(self, dest):
        if filesystem.exists(self.resume_file):
            data = None
            with filesystem.fopen(self.resume_file, 'rb') as resume:
                from bencode import BTFailure
                try:
                    from bencode import bdecode, bencode
                    decoded = bdecode(resume.read())
                    decoded['save_path'] = dest.encode('utf-8')
                    data = bencode(decoded)

                except BTFailure:
                    pass

            if data:
                with filesystem.fopen(self.resume_file, 'wb') as resume:
                    resume.write(data)
	def change_resume_file(self, dest):
		if filesystem.exists(self.resume_file):
			data = None
			with filesystem.fopen(self.resume_file, 'rb') as resume:
				from bencode import BTFailure
				try:
					from bencode import bdecode, bencode
					decoded = bdecode(resume.read())
					decoded['save_path'] = dest.encode('utf-8')
					data = bencode(decoded)

				except BTFailure:
					pass

			if data:
				with filesystem.fopen(self.resume_file, 'wb') as resume:
					resume.write(data)
def save_dbs():
    path = filesystem.join(_addondir, 'dbversions')

    with filesystem.save_make_chdir_context(path):

        for fn in filesystem.listdir(path):
            filesystem.remove(fn)

        log_dir = xbmc.translatePath('special://logpath').decode('utf-8')
        log_path = filesystem.join(log_dir, 'kodi.log')
        log.debug(log_path)
        with filesystem.fopen(log_path, 'r') as lf:
            for line in lf.readlines():
                if 'Running database version' in line:
                    log.debug(line)
                    name = line.split(' ')[-1].strip('\r\n\t ').decode('utf-8')
                    with filesystem.fopen(name, 'w'):
                        pass
Exemple #20
0
 def has_link(strmFilename, link):
     strmFilename_alt = strmFilename + '.alternative'
     if filesystem.isfile(strmFilename_alt):
         with filesystem.fopen(strmFilename_alt, "r") as alternative:
             for line in alternative:
                 if line.startswith('plugin://'):
                     if link in urllib.unquote(line):
                         return True
     return False
	def has_link(strmFilename, link):
		strmFilename_alt = strmFilename + '.alternative'
		if filesystem.isfile(strmFilename_alt):
			with filesystem.fopen(strmFilename_alt, "r") as alternative:
				for line in alternative:
					if line.startswith('plugin://'):
						if link in urllib.unquote(line):
							return True
		return False
def find_direct_link(url, settings):
	match = re.search(r'\.php.+?t=(\d+)', url)
	if match:
		path_store = filesystem.join(settings.torrents_path(), 'nnmclub', match.group(1))
		if filesystem.exists(path_store):
			debug('[nnm-club] Direct link found')
			with filesystem.fopen(path_store, 'r') as f:
				return f.read()
	return None
	def make_alternative(self, strmFilename, link, parser):
		strmFilename_alt = strmFilename + '.alternative'

		s_alt = u''
		if filesystem.isfile(strmFilename_alt):
			with filesystem.fopen(strmFilename_alt, "r") as alternative:
				s_alt = alternative.read().decode('utf-8')

		if not (link in s_alt):
			try:
				with filesystem.fopen(strmFilename_alt, "a+") as alternative:
					for key, value in parser.Dict().iteritems():
						if key in ['director', 'studio', 'country', 'plot', 'actor', 'genre', 'country_studio']:
							continue
						alternative.write('#%s=%s\n' % (make_utf8(key), make_utf8(value)))
					alternative.write(link.encode('utf-8') + '\n')
			except:
				pass
def save_dbs():
	path = filesystem.join(_addondir, 'dbversions')

	with filesystem.save_make_chdir_context(path):

		for fn in filesystem.listdir(path):
			filesystem.remove(fn)

		log_dir = xbmc.translatePath('special://logpath').decode('utf-8')
		log_path = filesystem.join(log_dir, 'kodi.log')
		log.debug(log_path)
		with filesystem.fopen(log_path, 'r') as lf:
			for line in lf.readlines():
				if 'Running database version' in line:
					log.debug(line)
					name = line.split(' ')[-1].strip('\r\n\t ').decode('utf-8')
					with filesystem.fopen(name, 'w'):
						pass
def download_torrent(url, path, settings):
	url = urllib2.unquote(url)
	debug('download_torrent:' + url)

	session = get_session(settings)
	r = session.get(url)
	with filesystem.fopen(path, 'wb') as torr:
		for chunk in r.iter_content(100000):
			torr.write(chunk)
Exemple #26
0
def scrape_now(fn):
    debug(fn)
    with filesystem.fopen(fn, 'r') as fin:
        from bencode import BTFailure
        try:
            from bencode import bdecode
            decoded = bdecode(fin.read())
        except BTFailure:
            debug("Can't decode torrent data (invalid torrent link?)")
            return {}

        info = decoded['info']

        import hashlib
        from bencode import bencode
        info_hash = hashlib.sha1(bencode(info)).hexdigest()

        hashes = [info_hash]
        import scraper

        result = []
        threads = []

        def start_scrape(announce):
            def do_scrape():
                try:
                    res = scraper.scrape(announce, hashes, 0.25)
                    result.append(res[info_hash])
                except:
                    debug(announce + ' - not working')
                    pass

            import threading
            t = threading.Thread(target=do_scrape)
            threads.append(t)
            t.start()

        if 'announce-list' in decoded:
            for announce in decoded['announce-list']:
                start_scrape(announce[0])

            alive = True
            while not result and alive:
                alive = False
                for t in threads:
                    if t.is_alive():
                        alive = True
                        break
        elif 'announce' in decoded:
            res = scraper.scrape(decoded['announce'], hashes)
            return res[info_hash]

        if result:
            return result[0]

    return {}
    def load(self):
        if not filesystem.exists(self._addon_xml):
            self.root = None
            self.mtime = 0
            return

        with filesystem.fopen(self._addon_xml, 'r') as f:
            content = f.read()
            self.root = ET.fromstring(content)
        self.mtime = filesystem.getmtime(self._addon_xml)
	def load(self):
		if not filesystem.exists(self._addon_xml):
			self.root = None
			self.mtime = 0
			return

		with filesystem.fopen(self._addon_xml, 'r') as f:
			content = f.read()
			self.root = ET.fromstring(content)
		self.mtime = filesystem.getmtime(self._addon_xml)
def find_direct_link(url, settings):
    match = re.search(r'\.php.+?t=(\d+)', url)
    if match:
        path_store = filesystem.join(settings.torrents_path(), 'nnmclub',
                                     match.group(1))
        if filesystem.exists(path_store):
            debug('[nnm-club] Direct link found')
            with filesystem.fopen(path_store, 'r') as f:
                return f.read()
    return None
Exemple #30
0
def fprint_tb(filename):
    import sys, filesystem
    exc_type, exc_val, exc_tb = sys.exc_info()
    import traceback

    with filesystem.fopen(filename, 'w') as out:
        traceback.print_exception(exc_type,
                                  exc_val,
                                  exc_tb,
                                  limit=10,
                                  file=out)
	def _AddTorrent(self, path):
		if filesystem.exists(path):
			with filesystem.fopen(path, 'rb') as tfile:
				content = tfile.read()

			try:
				self.status = self.engine.load_torrent(base64.b64encode(content), 'RAW')
			except KeyError:
				pass

				debug('AcePlayer: Torrent loaded')
Exemple #32
0
    def write_alternative(strmFilename, links_with_ranks):
        strmFilename_alt = strmFilename + '.alternative'
        with filesystem.fopen(strmFilename_alt, 'w') as alternative:
            for variant in links_with_ranks:
                if 'link' in variant:
                    for k, v in variant.iteritems():
                        if k != 'link':
                            alternative.write('#%s=%s\n' %
                                              (make_utf8(k), make_utf8(v)))

                    alternative.write(make_utf8(variant['link']) + '\n')
Exemple #33
0
 def __exit__(self, exc_type, exc_val, exc_tb):
     if exc_type:
         import filesystem
         with filesystem.fopen(self.filename(), 'w') as out:
             import traceback
             traceback.print_exception(exc_type,
                                       exc_val,
                                       exc_tb,
                                       limit=10,
                                       file=out)
         return True
	def _AddTorrent(self, path):
		if filesystem.exists(path):
			with filesystem.fopen(path, 'rb') as tfile:
				content = tfile.read()

			try:
				self.status = self.engine.load_torrent(base64.b64encode(content), 'RAW')
			except KeyError:
				pass

				debug('AcePlayer: Torrent loaded')
def save_download_link(parser, settings, link):
	try:
		path_store = filesystem.join(settings.torrents_path(), 'nnmclub')
		if not filesystem.exists(path_store):
			filesystem.makedirs(path_store)
		source = parser.link()
		match = re.search(r'\.php.+?t=(\d+)', source)
		if match:
			with filesystem.fopen(filesystem.join(path_store, match.group(1)), 'w') as f:
				f.write(link)
	except:
		pass
def save_download_link(parser, settings, link):
    #try:
    if True:
        path_store = filesystem.join(settings.torrents_path(), 'nnmclub')
        if not filesystem.exists(path_store):
            filesystem.makedirs(path_store)
        source = parser.link()
        match = re.search(r'\.php.+?t=(\d+)', source)
        if match:
            with filesystem.fopen(filesystem.join(path_store, match.group(1)),
                                  'w') as f:
                f.write(link)
def process_chunk(chunk, data_path, seeds_peers):
	import json

	for item in chunk:
		filename = filesystem.join(data_path, 'nnmclub', item[2])
		remove_file = False
		with filesystem.fopen(filename, 'w') as stat_file:
			try:
				json.dump(seeds_peers[item[1]], stat_file)
			except KeyError:
				remove_file = True
		if remove_file:
			filesystem.remove(filename)
Exemple #38
0
def process_chunk(chunk, data_path, seeds_peers):
    import json

    for item in chunk:
        filename = filesystem.join(data_path, 'nnmclub', item[2])
        remove_file = False
        with filesystem.fopen(filename, 'w') as stat_file:
            try:
                json.dump(seeds_peers[item[1]], stat_file)
            except KeyError:
                remove_file = True
        if remove_file:
            filesystem.remove(filename)
Exemple #39
0
    def make_alternative(self, strmFilename, link, parser):
        strmFilename_alt = strmFilename + '.alternative'

        s_alt = u''
        if filesystem.isfile(strmFilename_alt):
            with filesystem.fopen(strmFilename_alt, "r") as alternative:
                s_alt = alternative.read().decode('utf-8')

        if not (link in s_alt):
            try:
                with filesystem.fopen(strmFilename_alt, "a+") as alternative:
                    for key, value in parser.Dict().iteritems():
                        if key in [
                                'director', 'studio', 'country', 'plot',
                                'actor', 'genre', 'country_studio'
                        ]:
                            continue
                        alternative.write('#%s=%s\n' %
                                          (make_utf8(key), make_utf8(value)))
                    alternative.write(link.encode('utf-8') + '\n')
            except:
                pass
        def Refresh(self):
            if filesystem.exists(self.progress_file_path):
                try:
                    try:
                        with filesystem.fopen(self.progress_file_path,
                                              'r') as progress_file:
                            args = progress_file.read().split('\n')
                    except:
                        args = [0]

                    args[0] = int(args[0])
                    self.update(*args)
                except:
                    pass
def download_torrent(url, path, settings):
    url = url.replace("details.php", "download.php")
    if not "passkey" in url:
        url += "&passkey=" + settings.hdclub_passkey

    try:
        import shutil

        response = urllib2.urlopen(url)
        with filesystem.fopen(path, "wb") as f:
            shutil.copyfileobj(response, f)
        return True
    except BaseException as e:
        print_tb(e)
        return False
def create_mark_file():
	import urllib2, shutil
	path = filesystem.join(_addondir, 'version_latest')
	if not filesystem.exists(path):
		try:
			with filesystem.fopen(path, 'w') as f:
				f.write('test')

			if filesystem.exists(path):
				url = 'https://github.com/vadyur/script.media.aggregator/releases/download/ver_0.15.2/version_latest'
				response = urllib2.urlopen(url)
				log.debug(response.read())
		except BaseException as e:
			log.print_tb(e)
			pass
def create_mark_file():
    import urllib2, shutil
    path = filesystem.join(_addondir, 'version_latest')
    if not filesystem.exists(path):
        try:
            with filesystem.fopen(path, 'w') as f:
                f.write('test')

            if filesystem.exists(path):
                url = 'https://github.com/vadyur/script.media.aggregator/releases/download/ver_0.15.2/version_latest'
                response = urllib2.urlopen(url)
                log.debug(response.read())
        except BaseException as e:
            log.print_tb(e)
            pass
Exemple #44
0
def seeds_peers(item):
    res = {}
    try:
        link = urllib.unquote(item['link'])
        try:
            import player
            settings = player.load_settings()
        except:
            settings = Settings.current_settings
        if 'nnm-club' in link:
            debug('seeds_peers: ' + link)
            t_id = re.search(r't=(\d+)', link).group(1)
            fn = filesystem.join(settings.torrents_path(), 'nnmclub',
                                 t_id + '.stat')
            debug(fn)
            with filesystem.fopen(fn, 'r') as stat_file:
                import json
                res = json.load(stat_file)
                debug(str(res))
        elif 'hdclub' in link:
            t_id = re.search(r'\.php.+?id=(\d+)', link).group(1)
            fn = filesystem.join(settings.torrents_path(), 'elitehd',
                                 t_id + '.torrent')
            return scrape_now(fn)
        elif 'bluebird' in link:
            t_id = re.search(r'\.php.+?id=(\d+)', link).group(1)
            fn = filesystem.join(settings.torrents_path(), 'bluebird',
                                 t_id + '.torrent')
            if not filesystem.exists(fn):
                import bluebird
                bluebird.download_torrent(link, fn, settings)
            return scrape_now(fn)
        elif 'rutor' in link:
            t_id = re.search(r'/torrent/(\d+)', link).group(1)
            fn = filesystem.join(settings.torrents_path(), 'rutor',
                                 t_id + '.torrent')
            return scrape_now(fn)
        '''
		elif 'kinohd'  in link:
			part = self.url.split('/')[-1]
			t_id = re.search(r'^(\d+)', part).group(1)
			fn = filesystem.join(settings.torrents_path(), 'kinohd', t_id + '.torrent')
			return scrape_now(fn)
		'''

    except BaseException as e:
        debug(str(e))
    return res
Exemple #45
0
    def get_links_with_ranks(strmFilename, settings, use_scrape_info=False):
        #import vsdbg
        #vsdbg._bp()

        strmFilename_alt = strmFilename + '.alternative'
        items = []
        saved_dict = {}
        if filesystem.isfile(strmFilename_alt):
            with filesystem.fopen(strmFilename_alt, "r") as alternative:
                curr_rank = 1
                while True:
                    line = alternative.readline()
                    if not line:
                        break
                    line = line.decode('utf-8')
                    if line.startswith('#'):
                        line = line.lstrip('#')
                        parts = line.split('=')
                        if len(parts) > 1:
                            saved_dict[parts[0]] = parts[1].strip(' \n\t\r')
                    elif line.startswith('plugin://script.media.aggregator'):
                        try:
                            saved_dict['link'] = line.strip(u'\r\n\t ')
                            if use_scrape_info:
                                sp = seeds_peers(saved_dict)
                                saved_dict = dict(saved_dict, **sp)
                            if 'rank' in saved_dict:
                                curr_rank = float(saved_dict['rank'])
                            else:
                                curr_rank = get_rank(
                                    saved_dict.get('full_title', ''),
                                    saved_dict, settings)
                        except BaseException as e:
                            import log
                            log.print_tb(e)
                            curr_rank = 1

                        item = {
                            'rank': curr_rank,
                            'link': line.strip(u'\r\n\t ')
                        }
                        items.append(dict(item, **saved_dict))
                        saved_dict.clear()

        items.sort(key=operator.itemgetter('rank'))
        #debug('Sorded items')
        #debug(items)
        return items
def get_tmdb_api_key():
	try:
		import xbmc, filesystem
		xml_path = xbmc.translatePath('special://home').decode('utf-8')
		xml_path = filesystem.join(xml_path, 'addons/metadata.common.themoviedb.org/tmdb.xml')
		with filesystem.fopen(xml_path, 'r') as xml:
			content = xml.read()
			match = re.search('api_key=(\w+)', content)
			if match:
				key = match.group(1)
				debug('get_tmdb_api_key: ok')
				return key

	except BaseException as e:
		debug('get_tmdb_api_key: ' + str(e))
		return 'f7f51775877e0bb6703520952b3c7840'
def download_torrent(url, path, settings):
    from base import save_hashes
    save_hashes(path)
    url = url.replace('details.php', 'download.php')
    if not 'passkey' in url:
        url += '&passkey=' + settings.hdclub_passkey

    try:
        import shutil
        response = urllib2.urlopen(real_url(url))
        with filesystem.fopen(path, 'wb') as f:
            shutil.copyfileobj(response, f)
        save_hashes(path)
        return True
    except BaseException as e:
        print_tb(e)
        return False
	def download_image(self, url, type):
		r = requests.get(url)
		debug(r.headers)
		
		if r.headers[ 'Content-Type'] == 'image/jpeg':
			filename = filesystem.join(self.__temp_path, 'temp.media-aggregator.' + type + '.jpg')
			
			debug('Start download: ' + filename + ' from ' + url)
			
			with filesystem.fopen(filename, 'wb') as f:
				for chunk in r.iter_content(100000):
					f.write(chunk)
					
			debug('End download: ' + filename)
			return filename
				
		return None
Exemple #49
0
    def decoded(self):
        if not self._decoded:
            data = None
            with filesystem.fopen(self.path, 'rb') as torr:
                data = torr.read()

            if data is None:
                return None

            from bencode import BTFailure
            try:
                from bencode import bdecode
                self._decoded = bdecode(data)
            except BTFailure:
                debug("Can't decode torrent data (invalid torrent link?)")
                return None

        return self._decoded
	def GetLastTorrentData(self):
		#raise NotImplementedError("def ###: not imlemented.\nPlease Implement this method")

		data = None
		with filesystem.fopen(self.path, 'rb') as torr:
			data = torr.read()

		if data is None:
			return None

		from bencode import BTFailure
		try:
			from bencode import bdecode
			decoded = bdecode(data)
		except BTFailure:
			debug("Can't decode torrent data (invalid torrent link?)")
			return None

		info = decoded['info']

		import hashlib
		from bencode import bencode
		self.info_hash = hashlib.sha1(bencode(info)).hexdigest()
		#debug(self.info_hash)

		name = '.'
		playable_items = []
		try:
			if 'files' in info:
				for i, f in enumerate(info['files']):
					# debug(i)
					# debug(f)
					name = os.sep.join(f['path'])
					size = f['length']
					#debug(name)
					if TorrentPlayer.is_playable(name):
						playable_items.append({'index': i, 'name': TorrentPlayer.Name(name), 'size': size})
					name = TorrentPlayer.Name(info['name'])
			else:
				playable_items = [ {'index': 0, 'name': TorrentPlayer.Name(info['name']), 'size': info['length'] } ]
		except UnicodeDecodeError:
			return None

		return { 'info_hash': self.info_hash, 'announce': decoded['announce'], 'files': playable_items, 'name': name }
def download_torrent(url, path, settings):
    from base import save_hashes
    save_hashes(path)

    url = urllib2.unquote(url)
    debug('download_torrent:' + url)

    s = get_session(settings)

    page = s.get(url)
    #debug(page.text.encode('utf-8'))
    soup = BeautifulSoup(page.text, 'html.parser')

    try:
        a = soup.select_one('#tv720 div.torrent_h a')
    except TypeError:
        a = None

    try:
        if a is None:
            a = soup.select_one('div.torrent_h > a')
    except TypeError:
        a = None

    if a is not None:
        href = 'https://tr.anidub.com' + a['href']
        debug(s.headers)
        r = s.get(href, headers={'Referer': url})
        debug(r.headers)

        if 'Content-Type' in r.headers:
            if not 'torrent' in r.headers['Content-Type']:
                return False

        try:
            with filesystem.fopen(path, 'wb') as torr:
                for chunk in r.iter_content(100000):
                    torr.write(chunk)
            save_hashes(path)
            return True
        except:
            pass

    return False
	def videotype(self):
		base_path 		= self.settings.base_path().encode('utf-8')
		rel_path 		= urllib.unquote(self.params.get('path', ''))
		nfoFilename 	= urllib.unquote(self.params.get('nfo', ''))
		from nforeader import NFOReader
		nfoFullPath 	= NFOReader.make_path(base_path, rel_path, nfoFilename)
		if filesystem.exists(nfoFullPath):
			with filesystem.fopen(nfoFullPath, 'r') as nfo:
				s = nfo.read()
				if '<episodedetails>' in s:
					return 'episode'
				if '<movie>' in s:
					return 'movie'
		try:
			import xbmc
			return xbmc.getInfoLabel('ListItem.DBTYPE')
		except BaseException as e:
			log.print_tb(e)
			return ''
	def __init__(self, path, temp_path):
		self.__root = None
		self.__path = path
		self.__temp_path = temp_path
		
		if not filesystem.exists(path):
			return

		try:
			with filesystem.fopen(path, 'r') as f:
				content = f.read()
				try:
					i = content.index('</movie>')
					if i >= 0:
						content = content[0:i + len('</movie>')]
				except:
					pass
				self.__root = ET.fromstring(content)  #ET.parse(self.__path)
		except IOError as e:
			debug("NFOReader: I/O error({0}): {1}".format(e.errno, e.strerror))
def add_media_process(title, imdb, settings):
	#import rpdb2
	#rpdb2.start_embedded_debugger('pw')
	count = 0

	hdclub_enable		= _addon.getSetting('hdclub_enable') == 'true'
	nnmclub_enable		= _addon.getSetting('nnmclub_enable') == 'true'

	if hdclub_enable:
		count += hdclub.search_generate(title, imdb, settings)
	if nnmclub_enable:
		count += nnmclub.search_generate(title, imdb, settings)

	if count:
		if not xbmc.getCondVisibility('Library.IsScanningVideo'):
			xbmc.executebuiltin('UpdateLibrary("video")')

	path = filesystem.join(addon_data_path(), imdb + '.ended')
	with filesystem.fopen(path, 'w') as f:
		f.write(str(count))
def download_torrent(url, path, settings):
	url = urllib2.unquote(url)
	debug('download_torrent:' + url)

	s = get_session(settings)

	page = s.get(url)
	#debug(page.text.encode('utf-8'))
	soup = BeautifulSoup(page.text, 'html.parser')

	try:
	    a = soup.select_one('#tv720 div.torrent_h a')
	except TypeError:
		a = None

	try:
		if a is None:
			a = soup.select_one('div.torrent_h > a')
	except TypeError:
		a = None

	if a is not None:
		href = 'http://tr.anidub.com' + a['href']
		debug(s.headers)
		r = s.get(href, headers={'Referer': url})
		debug(r.headers)
		
		if 'Content-Type' in r.headers:
			if not 'torrent' in r.headers['Content-Type']:
				return False
		
		try:
			with filesystem.fopen(path, 'wb') as torr:
				for chunk in r.iter_content(100000):
					torr.write(chunk)
			return True
		except: 
			pass

	return False
	def relativevideofile(self):
		with filesystem.fopen(self.torrent_path, 'rb') as torr:
			data = torr.read()

			if data is None:
				return self.playable_item['name']

			from bencode import BTFailure
			try:
				from bencode import bdecode
				decoded = bdecode(data)
			except BTFailure:
				debug("Can't decode torrent data (invalid torrent link?)")
				return self.playable_item['name']

			info = decoded['info']

			if 'files' in info:
				from base import TorrentPlayer
				return filesystem.join(TorrentPlayer.Name(info['name']), self.playable_item['name'])

		return self.playable_item['name']
	def get_links_with_ranks(strmFilename, settings, use_scrape_info = False):
		strmFilename_alt = strmFilename + '.alternative'
		items = []
		saved_dict = {}
		if filesystem.isfile(strmFilename_alt):
			with filesystem.fopen(strmFilename_alt, "r") as alternative:
				curr_rank = 1
				while True:
					line = alternative.readline()
					if not line:
						break
					line = line.decode('utf-8')
					if line.startswith('#'):
						line = line.lstrip('#')
						parts = line.split('=')
						if len(parts) > 1:
							saved_dict[parts[0]] = parts[1].strip(' \n\t\r')
					elif line.startswith('plugin://script.media.aggregator'):
						try:
							if use_scrape_info:
								saved_dict['link'] = line.strip(u'\r\n\t ')
								sp = seeds_peers(saved_dict)
								saved_dict = dict(saved_dict, **sp)
							if 'rank' in saved_dict:
								curr_rank = float(saved_dict['rank'])
							else:
								curr_rank = get_rank(saved_dict['full_title'], saved_dict, settings)
						except:
							curr_rank = 1

						item = {'rank': curr_rank, 'link': line.strip(u'\r\n\t ')}
						items.append(dict(item, **saved_dict))
						saved_dict.clear()

		items.sort(key=operator.itemgetter('rank'))
		#debug('Sorded items')
		#debug(items)
		return items
	def setSetting(self, id, val):
		# not work in Python 2.6
		# item = self.root.find("./setting[@id='%s']" % str(id))

		item = None
		settings = self.root.findall("setting")
		for setting in settings:
			if setting.attrib.get('id') == id:
				item = setting
				break

		if item is not None:
			item.set('value', str(val))
		else:
			ET.SubElement(self.root, 'setting', attrib={'id': str(id), 'value': str(val)})

		with filesystem.fopen(self._addon_xml, 'w') as f:
			f.write('<settings>\n')
			for item in self.root:
				f.write('    <setting id="%s" value="%s" />\n' % (Addon._xml(item.get('id')), Addon._xml(item.get('value'))))
			f.write('</settings>\n')

		self.mtime = filesystem.getmtime(self._addon_xml)
def add_media_case():
	if _addon.getSetting('role').decode('utf-8') == u'клиент':
		return

	path = filesystem.join(addon_data_path(), 'add_media')
	if filesystem.exists(path):
		with filesystem.fopen(path, 'r') as f:
			while True:
				try:
					title = f.readline().strip(' \n\t\r').decode('utf-8')
					imdb = f.readline().strip(' \n\t\r')

					log.debug('add_media_case: ' + imdb)
					log.debug(title)

					if title and imdb:
						add_media_process(title, imdb, player.load_settings())
					else:
						break
				except BaseException as e:
					log.print_tb(e)

		filesystem.remove(path)
	def __init__(self):
		if not filesystem.exists(SOURCES_REAL_PATH):
			with filesystem.fopen(SOURCES_REAL_PATH, 'w') as src:
				src.write('<sources>\n'
						  '    <programs>\n'
				          '        <default pathversion="1"></default>\n'
				          '    </programs>\n'
				          '    <video>\n'
				          '        <default pathversion="1"></default>\n'
				          '    </video>\n'
				          '    <music>\n'
				          '        <default pathversion="1"></default>\n'
				          '    </music>\n'
				          '    <pictures>\n'
				          '        <default pathversion="1"></default>\n'
				          '    </pictures>\n'
				          '    <files>\n'
				          '        <default pathversion="1"></default>\n'
				          '    </files>\n'
						  '</sources>\n'
				)

		self.xml_tree = ET.parse(SOURCES_REAL_PATH)
		self.sources = None