Esempio n. 1
0
	def __basic_handle_feed__(self, feed, options):
		"""This method gets a feed name and handles is simpely (by time)."""
		#first we need to determend how we will handle this feed.
		#To do that we will now see where to download the torrents
		if debug_mode():
			print('download_manager().__basic_handle_feed__() started with feed {} and the following options:{}'.format(feed, options))
		if self.files_manager.config_graber('download_folder', feed):
			options['download_folder'] = self.files_manager.config_graber('download_folder', feed)
		#now we will aww if we need to report them (yes or False)
		if not options['report'] == 'no':
			if self.files_manager.config_graber('report', feed) == 'no':
				options['report'] = False
		#now we will gather the time stamp
		options['last_update'] = self.files_manager.config_graber('last_update', feed)
		#---- We now finished with setting checks!
		#ok then, now we can send our ninja to nab all the torrents, we then cut them open and see if they are needed or we can eat them ]:->
		#We expect to get a list of lists of torrents [[torrent name, torrent address],...]
		try:
			torrents = torrents_grabber(options['last_update'], self.files_manager.config_graber('url', feed))
			torrents.torrents_finder() #this now holds our torrents :o
			#We will now test for text filters ;)
			skip_feed = False
		except IOError:
			#means we need to skip this feed
			skip_feed = True
		if not skip_feed:
			for torrent in torrents.torrents:
				#all the checks from now on will be to eliminate torrents, those that are left should be downloaded.
				if not self.text_analyzer.check_words_status(self.files_manager.config_graber('must-include', feed, ','), self.files_manager.config_graber('dont-include', feed, ','), self.files_manager.config_graber('one-include', feed, ','), torrent[0]):
					#if we got here, it means that torrent isn't good to download because it failed on text filters.
					torrents.torrents.remove(torrent)
			#we now check for regex.
			regex = self.files_manager.config_graber('regex', feed)
			if regex:
				for torrent in torrents.torrents:
					if not re.search(regex, torrent[0]):
						#means the name of the torrent doesn't pass the regex.
						torrents.torrents.remove(torrent)
			#we can now download all the torrents, we will update time after we download them as its ok download something twice, it isn't ok to lose data in case of write failure.
			torrent_names = list()#names (aka title) of all the passed torrents
			torrent_address = list()#full address to the torrents
			for torrent in torrents.torrents:
				torrent_names.append(torrent[0])
				torrent_address.append(torrent[1])
			try:
				self.files_manager.grab_torrents(torrent_address, location=options['download_folder'])
				got_torrents = True
			except:
				got_torrents = False
				print('crashed on:', *torrents.torrents)
				print('I will continue to download other feeds and will come back to this feed on the next run.')
			if got_torrents:
				if torrent_names and (options['report'] == 'yes' or self.report_all):
					self.report.append(torrent_names)#this will handle the report at the end
				#now lets update time in the feed file and we finished with this feed
				self.files_manager.set_time(torrents.new_time(), feed)
		if debug_mode():
			print('download_manager().__basic_handle_feed__() finished with current feed.')