Esempio n. 1
0
def __isCacheUpdateNeeded(force=False, cacheFilename=None, frequency=360):
    '''
    Used to determine if a cachefile needs to be updated

    force: force an update
    cacheFilename: The full path of the file to check
    frequency: how often the file should be updated in minutes

    Returns Boolean
    '''
    logger = logging.getLogger(__name__)
    try:
        # Get the modification time
        lastModified = __modificationDate(cacheFilename)

        if lastModified == -1:
            return True

        logger.debug("Checking cache: {0} {1}:{2}".format(cacheFilename, frequency, math.ceil((time.time()/60 - lastModified/60))))
        difference = math.ceil((time.time()/60 - lastModified/60))
        if difference >= frequency:
            logger.debug("Cache update needed".format(cacheFilename) )
            return True
        else:
            logger.debug("Cache update not needed".format(cacheFilename))
            return False
            
    except Exception as e:
        logger.error("Cache update for {0} could not be preformed".format(cacheFilename))
Esempio n. 2
0
    def __init__(self, dbType):
        '''
        Set the database module that should be used
        '''
        self.logger = logging.getLogger(__name__)

        if dbType == u'SQLITE3':
            self.Database = ff_sqlite3.Database()
            self.logger.debug("SQLite3 Database initialized")
            
        else:
            self.logger.critcal("There was an issue initializing the database. [{0}]{1}".format(dbType, e))
Esempio n. 3
0
    def __init__(self, settings={}):

        # Setup the database object
        self.torrentDB = Databases(flannelfox.settings['database']['defaultDatabaseEngine'])

        self.logger = logging.getLogger(__name__)
        self.logger.info("TransmissionClient INIT")     

        if settings['type'] == "transmission":
            self.logger.info("TorrentClient Setup")
            self.client = Transmission.Client(settings=settings)
        else:
            self.logger.info("TorrentClient Not Defined")
            raise ValueError("Torrent client type not defined!")
Esempio n. 4
0
def __modificationDate(filename):
    '''
    Checks the modification time of the file it is given

    filename: The full path of the file to return the timestamp of.

    Returns the timestamp in seconds since epoch
    '''
    logger = logging.getLogger(__name__)
    try:
        return int(datetime.datetime.fromtimestamp(os.path.getmtime(filename)).strftime("%s"))
    except:
        logger.error("There was a problem getting the timestamp for:\n{0}".format(filename))
        return -1
Esempio n. 5
0
    def __init__(self):
        self.logger = logging.getLogger(__name__)

        dbSetup = ( "PRAGMA foreign_keys = off;"
            "BEGIN TRANSACTION;"
            "CREATE TABLE QueuedTorrents (comparison TEXT, hashString TEXT, feedDestination TEXT, minRatio REAL, minTime INTEGER, addedOn INTEGER, added INTEGER, queuedOn INTEGER, torrentType INTEGER, proper TEXT, source TEXT, container TEXT, codec TEXT, quality TEXT, day INTEGER, month INTEGER, year INTEGER, torrentTitle TEXT, url TEXT, title TEXT, season INTEGER, episode INTEGER, releaseType TEXT, album TEXT, artist TEXT);"
            "CREATE TABLE BlacklistedTorrents (url STRING PRIMARY KEY);"
            "CREATE INDEX idx_Queue ON QueuedTorrents (added COLLATE BINARY ASC, queuedOn COLLATE BINARY ASC);"
            "CREATE INDEX idx_FeedDestination ON QueuedTorrents (feedDestination COLLATE BINARY ASC);"
            "CREATE INDEX idx_Added ON QueuedTorrents (added COLLATE BINARY DESC);"
            "CREATE INDEX idx_HashString ON QueuedTorrents (hashString COLLATE BINARY ASC);"
            "CREATE INDEX idx_TorrentType ON QueuedTorrents (torrentType COLLATE BINARY ASC);"
            "CREATE VIEW GenericTorrentsView AS SELECT QueuedTorrents.comparison, QueuedTorrents.hashstring, QueuedTorrents.feeddestination, QueuedTorrents.minratio, QueuedTorrents.mintime, QueuedTorrents.addedon, QueuedTorrents.added, QueuedTorrents.queuedon, QueuedTorrents.day, QueuedTorrents.month, QueuedTorrents.year, QueuedTorrents.torrenttitle, QueuedTorrents.url, QueuedTorrents.title, QueuedTorrents.season, QueuedTorrents.episode, QueuedTorrents.codec, QueuedTorrents.container, QueuedTorrents.proper, QueuedTorrents.quality, QueuedTorrents.source, QueuedTorrents.torrentType FROM QueuedTorrents WHERE torrentType = 'none';"
            "CREATE VIEW QueuedTorrentsView AS SELECT QueuedTorrents.comparison, QueuedTorrents.hashstring, QueuedTorrents.feeddestination, QueuedTorrents.minratio, QueuedTorrents.mintime, QueuedTorrents.addedon, QueuedTorrents.added, QueuedTorrents.queuedon, QueuedTorrents.day, QueuedTorrents.month, QueuedTorrents.year, QueuedTorrents.torrenttitle, QueuedTorrents.url, QueuedTorrents.title, QueuedTorrents.season, QueuedTorrents.episode, QueuedTorrents.codec, QueuedTorrents.container, QueuedTorrents.proper, QueuedTorrents.quality, QueuedTorrents.source, QueuedTorrents.torrentType FROM QueuedTorrents WHERE added = 0 ORDER BY queuedOn ASC;"
            "CREATE VIEW MovieTorrentsView AS SELECT QueuedTorrents.comparison, QueuedTorrents.hashstring, QueuedTorrents.feeddestination, QueuedTorrents.minratio, QueuedTorrents.mintime, QueuedTorrents.addedon, QueuedTorrents.added, QueuedTorrents.queuedon, QueuedTorrents.year, QueuedTorrents.torrenttitle, QueuedTorrents.url, QueuedTorrents.title, QueuedTorrents.codec, QueuedTorrents.container, QueuedTorrents.proper, QueuedTorrents.quality, QueuedTorrents.source, QueuedTorrents.torrentType FROM QueuedTorrents WHERE torrentType = 'movie';"
            "CREATE VIEW MusicTorrentsView AS SELECT QueuedTorrents.comparison, QueuedTorrents.hashstring, QueuedTorrents.feeddestination, QueuedTorrents.minratio, QueuedTorrents.mintime, QueuedTorrents.addedon, QueuedTorrents.added, QueuedTorrents.queuedon, QueuedTorrents.year, QueuedTorrents.torrenttitle, QueuedTorrents.url, QueuedTorrents.title, QueuedTorrents.album, QueuedTorrents.artist, QueuedTorrents.codec, QueuedTorrents.releaseType, QueuedTorrents.container, QueuedTorrents.proper, QueuedTorrents.quality, QueuedTorrents.source, QueuedTorrents.torrentType FROM QueuedTorrents WHERE torrentType = 'music';"
            "CREATE VIEW AddedTorrentsView AS SELECT QueuedTorrents.comparison, QueuedTorrents.hashstring, QueuedTorrents.feeddestination, QueuedTorrents.minratio, QueuedTorrents.mintime, QueuedTorrents.addedon, QueuedTorrents.added, QueuedTorrents.queuedon, QueuedTorrents.day, QueuedTorrents.month, QueuedTorrents.year, QueuedTorrents.torrenttitle, QueuedTorrents.url, QueuedTorrents.title, QueuedTorrents.season, QueuedTorrents.episode, QueuedTorrents.codec, QueuedTorrents.container, QueuedTorrents.proper, QueuedTorrents.quality, QueuedTorrents.source, QueuedTorrents.torrentType FROM QueuedTorrents WHERE added = 1 ORDER BY queuedOn ASC;"
            "CREATE VIEW TVTorrentsView AS SELECT QueuedTorrents.comparison, QueuedTorrents.hashstring, QueuedTorrents.feeddestination, QueuedTorrents.minratio, QueuedTorrents.mintime, QueuedTorrents.addedon, QueuedTorrents.added, QueuedTorrents.queuedon, QueuedTorrents.day, QueuedTorrents.month, QueuedTorrents.year, QueuedTorrents.torrenttitle, QueuedTorrents.url, QueuedTorrents.title, QueuedTorrents.season, QueuedTorrents.episode, QueuedTorrents.codec, QueuedTorrents.container, QueuedTorrents.proper, QueuedTorrents.quality, QueuedTorrents.source, QueuedTorrents.torrentType FROM QueuedTorrents WHERE torrentType = 'tv';"
            "COMMIT TRANSACTION;"
            "PRAGMA foreign_keys = on;" )

        self.__execScriptDB(dbSetup)
Esempio n. 6
0
    def __init__(
        self, settings={}, host=u"localhost", port=u"9091", user=None, password=None, rpcLocation=None, https=False
    ):

        self.logger = logging.getLogger(__name__)
        self.logger.info("TransmissionClient INIT")
        self.logger.debug("TransmissionClient Settings: {0}".format(settings))

        self.elements = {}
        self.elements["queue"] = []
        self.elements["sessionId"] = None

        if settings != {}:
            self.elements.update(settings)
        else:
            self.elements["host"] = host
            self.elements["port"] = port
            self.elements["user"] = user
            self.elements["password"] = password
            self.elements["rpcLocation"] = rpcLocation
            self.elements["https"] = https

        self.logger.debug("TransmissionClient Settings 2: {0}".format(self.elements))

        # Strip off the leading slash if it exists
        self.elements["rpcLocation"] = self.elements["rpcLocation"].lstrip("/")

        # tag generator the keep transmisison calls matched
        self.tagGenerator = self.__generateTag()

        # Build the server URI
        self.elements["uri"] = u"http"

        if self.elements["https"]:
            self.elements["uri"] += u"s"

        self.elements["uri"] += u"://{0}:{1}".format(self.elements["host"], self.elements["port"])
        self.logger.debug("TransmissionClient URL: {0}".format(self.elements["uri"]))
Esempio n. 7
0
def changeCharset(data, charset="utf-8", type="xml"):
    '''
    Used to change the character set of a string to the desired format

    data: The text to be converted
    charset: The format the text should be returned in
    type: The engine to be used to convert the charset

    Returns the text after converted
    '''
    logger = logging.getLogger(__name__)
    logger.debug("Tyring to convert {0} to {1}".format(charset, type))

    if charset is None:
        charset = "utf-8"

    try:
        data = BeautifulSoup(data, type)
        data = data.encode(encoding=charset, errors="xmlcharrefreplace")
    except Exception as e:
        logger.debug("Charset Conversion issue".format(e))
        data = ""

    return data
Esempio n. 8
0
def __updateCacheFile(force=False, cacheFilename=None, data=None, frequency=360):
    '''
    Used to update cache files for api calls. This is needed so we do not keep
    asking the api servers for the same information on a frequent basis. The
    fault frequency is to ask once an hour.

    force: preform the update reguardless of frequency
    location: where to save the file
    frequency: how often to update the file in minutes
    '''

    logger = logging.getLogger(__name__)

    try:
        if __isCacheUpdateNeeded(cacheFilename=cacheFilename, frequency=frequency):
            logger.debug("Cache update for {0} needed".format(cacheFilename))
            with open(cacheFilename, 'w') as cache:
                cache.write(data)

        else:
            logger.debug("Cache update for {0} not needed".format(cacheFilename)    )

    except Exception as e:
        logger.error("There was a problem writing a cache file {0}: {1}".format(cacheFilename, e))
Esempio n. 9
0
# flannelfox Includes
import flannelfox
from flannelfox import logging
from flannelfox import Settings

# rssdaemon Includes
from flannelfox.torrenttools import Torrents, TorrentQueue
from flannelfox.torrenttools.Torrents import TORRENT_TYPES


# TODO: can this be moved?
httpRegex = re.compile(ur"https?://([^/]+)(?:/.*)?")

# Setup the logging agent
logger = logging.getLogger(__name__)


def __readRSSFeed(url):

    response = u''
    xmlData = None
    httpCode = None
    encoding = "utf-8"
    pid = os.getpid()

    try:

        # initialize the responses
        response = None
Esempio n. 10
0
class lastfmApi():

    logger = logging.getLogger(__name__)

    def __getLibraryArtistsFeed(self, apiKey, username):

        currentPage = 1
        maxPages = 1
        httpResponse = -1
        artists = []

        headers = {
            'Content-Type':
            'application/json',
            'user-agent':
            'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36'
        }

        params = {
            'method': 'library.getArtists',
            'api_key': apiKey,
            'user': username,
            'format': 'json',
            'page': currentPage
        }

        while currentPage <= maxPages:

            reply = None
            params['page'] = currentPage

            try:

                r = requests.get(settings['apis']['lastfm'],
                                 headers=headers,
                                 params=params,
                                 timeout=60)
                httpResponse = r.status_code
                self.logger.debug(
                    'Fetched LastFm album page {0} of {1}: [{2}]'.format(
                        currentPage, maxPages, httpResponse))

                if httpResponse == 200:
                    reply = r.json()

                else:
                    raise ValueError

            except Exception:
                httpResponse = -1
                self.logger.error(
                    'There was a problem fetching a Lastfm album page\n{0}'.
                    format(httpResponse))
                return artists

            maxPages = int(reply['artists']['@attr']['totalPages'])
            currentPage = currentPage + 1

            artists.extend(
                [artist['name'] for artist in reply['artists']['artist']])

        return (httpResponse, artists)

    def getLibraryArtists(self, apiKey, username):

        httpResponse, artists = self.__getLibraryArtistsFeed(username=username,
                                                             apiKey=apiKey)

        return artists
Esempio n. 11
0
class QueueReader():

	logger = logging.getLogger(__name__)

	# Setup the database object
	database = None
	defaultDatabaseType = settings['database']['defaultDatabaseEngine']

	# Torrent client object
	torrentClient = None

	def __init__(self, *args):
		self.database = Databases(
			dbType = self.defaultDatabaseType
		)
		self.torrentClient = self.setupTorrentClient()
		self.torrentClient.updateQueue()


	def setupTorrentClient(self):
		if 'client' not in settings:
			self.logger.warning('No client was configured to monitor!')
			return None

		# Try to create a torrent client instance
		try:
			if settings['client']['type'] == 'transmission':
				self.logger.debug("Creating Transmission Client");
				return TorrentClient()

		except Exception as e:
			self.logger.error('Could not create torrent client: {0}'.format(e))
			return None

		if self.torrentClient == None:
			self.logger.error('No client was configured to monitor!')
			return None


	def checkSubDirectoryFreeSpace(self):
		# Check for freespace in each directory
		# Collect all the active destinations

		destinations = []
		for torrent in self.torrentClient.getQueue():
			if torrent['downloadDir'] not in destinations:
				destinations.append(torrent['downloadDir'])

		# Check each destination for free space
		for destination in destinations:
			if platform.system() == 'Windows':
				destination = 'U:'


			while FreeSpace.check(destination,'M') < settings['minimumFreeSpace']:

				finishedTorrents = self.torrentClient.getFinishedSeeding()

				if len(finishedTorrents) > 0:

					self.logger.info('Freeing up space in destination: [{0}|{1}]'.format(
						destination,
						FreeSpace.check(destination,'M'))
					)

					# Stop a finished torrent
					finishedTorrent = finishedTorrents.pop()
					self.torrentClient.deleteTorrent(
						hashString=finishedTorrent['hashString'],
						reason='Freespace Needed (minimumFreeSpace)'
					)

					self.torrentClient.updateQueue()


	def checkMainDirectoryFreeSpace(self):
		# Check for used space in master dir

		if settings['maxUsedSpace'] > 0:

			while int(UsedSpace.check(settings['files']['maxUsedSpaceDir'],'G')) >= int(settings['maxUsedSpace']):

				finishedTorrents = self.torrentClient.getFinishedSeeding()

				if len(finishedTorrents) > 0:

					self.logger.info('Freeing up space in destination: [{0}|{1}]'.format(
						UsedSpace.check(settings['files']['maxUsedSpaceDir'],'G'),
						settings['maxUsedSpace'])
					)

					# Stop a finished torrent
					finishedTorrent = finishedTorrents.pop()

					self.torrentClient.deleteTorrent(hashString=finishedTorrent['hashString'],reason='Freespace Needed (maxUsedSpace)')

					self.torrentClient.updateQueue()


	def checkQueueSize(self):
		# Ensure there are not too many torrents running

		while len(self.torrentClient.getQueue()) > settings['queueManagement']['maxTorrents']:

			finishedTorrents = self.torrentClient.getFinishedSeeding()

			if len(finishedTorrents) <= 0:
				break

			while len(finishedTorrents) > 0:

				self.logger.info('Too many torrents are running, trying to remove one {0}/{1}'.format(
					settings['queueManagement']['maxTorrents'],
					len(self.torrentClient.getQueue())
				))

				# Stop a finished torrent
				finishedTorrent = finishedTorrents.pop()

				self.torrentClient.deleteTorrent(hashString=finishedTorrent['hashString'],reason='Too Many Torrents Running')

			self.torrentClient.updateQueue()


	def checkFinishedTorrents(self):
		# Remove Finished torrents is strict queue management is enabled

		while settings['queueManagement']['strictQueueManagement'] and len(self.torrentClient.getFinishedSeeding()) > 0:
			finishedTorrents = self.torrentClient.getFinishedSeeding()

			if len(finishedTorrents) <= 0:
				break

			self.logger.info('Strict Queue Management is enabled, stopping {0} finished torrents.'.format(len(finishedTorrents)))

			for finishedTorrent in finishedTorrents:
				self.torrentClient.deleteTorrent(hashString=finishedTorrent['hashString'], reason='Strict Queue Management Enabled and Torrent Finished')

			self.torrentClient.updateQueue()


	def addTorrents(self):

		# Add torrents if there is room
		while ( len(self.torrentClient.getQueue()) < settings['queueManagement']['maxTorrents'] and
				len(self.database.getQueuedTorrents(selectors=['url', 'feedDestination'],num=1)) > 0 and
				len(self.torrentClient.getDownloading()) < settings['queueManagement']['maxDownloadingTorrents'] and
				(
					int(UsedSpace.check(settings['files']['maxUsedSpaceDir'],'G')) < int(settings['maxUsedSpace']) or
					int(settings['maxUsedSpace']) == 0
				)
			):

			queuedTorrents = self.database.getQueuedTorrents(selectors=['url', 'feedDestination'])

			self.logger.info('There are {0} queued torrents, let\'s add them'.format(
				len(queuedTorrents)
			))

			# Get a new torrent
			newTorrent = queuedTorrents.pop()

			# Add new torrent
			# If a destination was not specified then don't pass one
			self.logger.info('Adding: {0}'.format(newTorrent))
			if newTorrent.get('feedDestination', None) is None:
				self.torrentClient.addTorrentURL(newTorrent['url'])
			else:
				self.torrentClient.addTorrentURL(newTorrent['url'],newTorrent['feedDestination'])

			self.torrentClient.updateQueue()


	def addTorrentsAndRemoveFinished(self):

		# Remove a finished torrent if room is needed to add a torrent
		while ( len(self.torrentClient.getQueue()) >= settings['queueManagement']['maxTorrents'] and
				len(self.database.getQueuedTorrents(selectors=['url', 'feedDestination'],num=1)) > 0 and
				len(self.torrentClient.getDownloading()) < settings['queueManagement']['maxDownloadingTorrents'] and
				(
					int(UsedSpace.check(settings['files']['maxUsedSpaceDir'],'G')) < int(settings['maxUsedSpace']) or
					int(settings['maxUsedSpace']) == 0
				)
			   ):

			queuedTorrents = self.database.getQueuedTorrents(selectors=['url', 'feedDestination'])
			dormantSeeds = self.torrentClient.getDormantSeeds()
			slowSeeds = self.torrentClient.getSlowestSeeds()

			if len(dormantSeeds) <= 0 and len(slowSeeds) <= 0:
				break

			self.logger.info('There are {0} queued torrents, let\'s make room and add them'.format(
				len(queuedTorrents)
			))


			while ( (
						len(dormantSeeds) > 0 or
						len(slowSeeds) > 0
					) and
						len(queuedTorrents) > 0
					):


				# Try to grab an old dormant seed
				if len(dormantSeeds) > 0:
					slowestFinishedSeed = dormantSeeds.pop()

				# Else get a slow seed
				else:
					slowestFinishedSeed = slowSeeds.pop()


				# Remove slow seed
				if self.torrentClient.deleteTorrent(hashString=slowestFinishedSeed['hashString'], reason='Making Room For a New Torrent'):

					# Get a new torrent
					newTorrent = queuedTorrents.pop()


					# Add new torrent
					# If a destination was not specified then don't pass one
					if newTorrent.get('feedDestination', None) is None:
						self.torrentClient.addTorrentURL(newTorrent['url'])
					else:
						self.torrentClient.addTorrentURL(newTorrent['url'],newTorrent['feedDestination'])

			self.torrentClient.updateQueue()
Esempio n. 12
0
import daemon

# FreeSpace Calculator
from flannelfox.ostools import FreeSpace, UsedSpace

# Logging
from flannelfox import logging

# Flannelfox Includes
from flannelfox.settings import settings
from flannelfox.databases import Databases
from flannelfox.torrentclients import TorrentClient
from flannelfox.tools import changeCharset

# Setup the logger.agent
logger = logging.getLogger(__name__)

class QueueReader():

	logger = logging.getLogger(__name__)

	# Setup the database object
	database = None
	defaultDatabaseType = settings['database']['defaultDatabaseEngine']

	# Torrent client object
	torrentClient = None

	def __init__(self, *args):
		self.database = Databases(
			dbType = self.defaultDatabaseType
Esempio n. 13
0
def readTraktTvConfigs(configFolder=settings['files']['traktConfigDir']):
	'''
	Reads the titles and other information from a specified trakt.tv list
	Content-Type:application/json
	trakt-api-version:2
	trakt-api-key:XXXX
	'''

	logger = logging.getLogger(__name__)

	validTypes = ['tv', 'movie']

	majorFeeds = {}

	for configFileName, configFileData in common.getConfigFiles(configFolder):

		logger.debug('Found {} feed(s) in {}'.format(len(configFileData), configFileName))

		for feedList in configFileData:

			try:

				# Setup some variables for the feed
				feedName = None
				feedType = None
				feedDestination = None
				minorFeeds = []
				feedFilters = []
				traktListResults = []
				title = None
				year = None
				feedFilterList = []

				# Make sure our list at least has some basic parts
				if feedList.get('username', None) is None:
					raise ValueError('A trakttv username must be specified')

				if feedList.get('api_key', None) is None:
					raise ValueError('A trakttv api key must be specified')

				if feedList.get('minorFeeds', None) is None:
					raise ValueError('You must specify one or more minorFeeds')

				# Get the feedName
				feedName = feedList.get('list_name','').lower().strip()

				if feedName == '':
					raise ValueError('Feeds with out names are not permitted')

				# Get the feedType
				feedType = feedList.get('type','none').lower().strip()

				# Get the feedDestination
				feedDestination = feedList.get('feedDestination','').strip()

				if feedDestination == '':
					raise ValueError('The feed has an invalid destination value')
				# TODO: Check if the location exists

				cacheFileName = os.path.join(getCacheDir(),feedName)

				if common.isCacheStillValid(cacheFileName=cacheFileName):

					traktListResults = common.readCacheFile(cacheFileName)
					logger.debug('Using cache file {} for {}'.format(cacheFileName, feedName))

				else:

					traktapi = trakttvApi()

					traktListResults = traktapi.getPublicList(
						apiKey=feedList.get('api_key'),
						username=feedList.get('username'),
						listname=feedName
					)

					logger.debug('Fetching new data {}'.format(feedName))

					if not isinstance(traktListResults, list) or len(traktListResults) < 1:

						traktListResults = common.readCacheFile(cacheFileName)
						logger.debug('Using cache file {} for {}'.format(cacheFileName, feedName))

						if not isinstance(traktListResults, list) or len(traktListResults) < 1:
							raise ValueError('There was not a valid feed reply nor is there a valid cache for the feed')

					else:

						common.updateCacheFile(cacheFileName=cacheFileName, data=traktListResults)
						logger.debug('Updating cache file {} for {}'.format(cacheFileName, feedName))

				logger.debug('Found {} items from {}'.format(len(traktListResults), feedName))

				# Collect the feeds
				logger.debug('{} contains {} minorFeed(s)'.format(feedName, len(feedList.get('minorFeeds',[]))))

				for minorFeed in feedList.get('minorFeeds',[]):

					try:

						url = minorFeed.get('url','').strip()
						minTime = int(minorFeed.get('minTime','0').strip()) # Hours Int
						minRatio = float(minorFeed.get('minRatio','0.0').strip()) # Ratio Float
						comparison = minorFeed.get('comparison','or').strip() # Comparison String
						minorFeeds.append({'url':url,'minTime':minTime,'minRatio':minRatio,'comparison':comparison})

					except (ValueError, KeyError, TypeError) as e:

						logger.warning('The feed contains an invalid minorFeed:\n{0}'.format(e))
						continue


				feedFilters = feedList.get('filters', [])

				# Loop through each show and append a filter for it
				for item in traktListResults:

					try:

						ruleList = []

						if feedList.get('like', False):
							titleMatchMethod = 'titleLike'

						else:
							titleMatchMethod = 'title'

						if 'show' not in item and feedType == 'tv':
							# This happens if you select the wrong type of media tv/movie
							raise ValueError('Media type is not show, but feed type is tv {0}'.format(title))

						elif 'movie' not in item and feedType == 'movie':
							# This happens if you select the wrong type of media tv/movie
							raise ValueError('Media type is not movie, but feed type is movie {0}'.format(title))

						elif 'show' in item and feedType == 'tv':

							item = item['show']
							title = item['title'].lower().strip().replace(' & ', ' and ')
							for ch in (':', '\\', '\'', ','):
								title = title.replace(ch, '')

						elif 'movie' in item and feedType == 'movie':

							item = item['movie']
							title = item['title'].lower().strip().replace(' & ', ' and ')
							for ch in (':', '\\', '\'', ','):
								title = title.replace(ch, '')

							year = str(item['year']).strip()

						else:
							raise ValueError('Could not use the trakt feed data')


						for filterItem in feedFilters:

							ruleList.append({'key':titleMatchMethod, 'val':title, 'exclude':False})

							if year is not None:
								ruleList.append({'key':'year', 'val':year, 'exclude':False})

							# Load the excludes
							for exclude in filterItem.get('exclude', []):
								for key, val in exclude.items():
									ruleList.append({'key':key.strip(), 'val':val.strip(), 'exclude':True})

							for include in filterItem.get('include', []):
								for key, val in include.items():
									ruleList.append({'key':key.strip(), 'val':val.strip(), 'exclude':False})

							feedFilterList.append(ruleList)

					except Exception as e:

						logger.warning('The {file} contains an invalid rule:\n{e}\n{t}'.format(file=configFileName,e=e,t=traceback.format_exc()))
						continue

				# Append the Config item to the dict
				majorFeeds['{}.{}'.format(configFileName,feedName)] = {
					'feedName':feedName,
					'feedType':feedType,
					'feedDestination':feedDestination,
					'minorFeeds':minorFeeds,
					'feedFilters':feedFilterList
				}

			except Exception as e:

				logger.warning('The {file} contains an invalid rule:\n{e}\n{t}'.format(file=configFileName,e=e,t=traceback.format_exc()))
				continue

	return majorFeeds
Esempio n. 14
0
def readRSS(configFolder=flannelfox.settings['files']['rssConfigDir']):
    '''
    Read the RSSFeedConfig file

    rssFilter children are stackable and help to refine the filter

    An empty or non-existant rssFilters will result in all items being a
    match

    Takes the location of the config file as a parameter
    Returns a dict of filters to match torrents with
    '''
    logger = logging.getLogger(__name__)

    majorFeeds = {}
    RSS_LISTS = []

    configFiles = os.listdir(configFolder)

    logger.debug("Reading Count: {0} feeds".format(len(configFiles)))

    try:

        for configFile in configFiles:
            
            # Skip non-json files
            if not configFile.endswith('.json'):
                continue

            logger.debug("Loading RSS config file: {0}".format(os.path.join(configFolder,configFile)))

            # Try to read in the rss lists
            try:
                with open(os.path.join(configFolder,configFile)) as rssJson:
                    RSS_LISTS = json.load(rssJson)
            except Exception as e:
                logger.error("There was a problem reading the rss config file\n{0}".format(e))
                continue

            # Loop through the rss lists
            try:

                for rssList in RSS_LISTS:
                    rssList = rssList.get("majorFeed")

                    # Make sure our list at least has some basic parts
                    if (rssList.get("list_name", None) is None or
                        rssList.get("feedDestination", None) is None or
                        rssList.get("minorFeeds", None) is None):
                        continue

                    # Setup some variables for the feed
                    feedName = None
                    feedType = None
                    feedDestination = None
                    minorFeeds = []
                    feedFilters = []
                    httpResponse = -1
                    title = None
                    year = None
                    useCache = False

                    # Get the feedName
                    try:
                        feedName = unicode(rssList.get("list_name",u"").lower().strip())
                        if feedName == u"":
                            raise ValueError
                    except (ValueError, KeyError) as e:
                        logger.warning("Feeds with out names are not permitted")
                        continue
      
                    # Get the feedType
                    try:
                        feedType = unicode(rssList.get("type",u"none").lower().strip())
                    except (ValueError, KeyError) as e:
                        feedType = u"none"
                        continue

                    # Get the feedDestination
                    try:
                        feedDestination = unicode(rssList.get("feedDestination",u"").strip())
                        # TODO: Check if the location exists
                    except (ValueError, KeyError) as e:
                        logger.warning("The feed has an invalid destination value")
                        continue

                    # Collect the feeds
                    try:
                        if rssList.get("minorFeeds",[]) is not None and len(rssList.get("minorFeeds",[])) > 0:
                            for minorFeed in rssList.get("minorFeeds",[]):
                                url = unicode(minorFeed.get("url",u"").strip())
                                minTime = int(minorFeed.get("minTime",u"0").strip()) # Hours Int
                                minRatio = float(minorFeed.get("minRatio",u"0.0").strip()) # Ratio Float
                                comparison = minorFeed.get("comparison",u"or").strip() # Comparison String
                                minorFeeds.append({u"url":url,u"minTime":minTime,u"minRatio":minRatio,u"comparison":comparison})
                    except (ValueError, KeyError, TypeError) as e:
                        logger.warning("The feed contains an invalid minorFeed:\n{0}".format(e))
                        continue

                    # Collect the feedFilters
                    try:
                        feedFilterList = []

                        feedFilters = rssList.get("filters", [])

                        # Loop through each show and append a filter for it
                        for filterItem in feedFilters:

                            ruleList = []

                            # Load the excludes
                            for exclude in filterItem.get("exclude", []):
                                key, val = exclude.items()[0]
                                ruleList.append({u"key":key, u"val":val, u"exclude":True})

                            for include in filterItem.get("include", []):
                                key, val = include.items()[0]
                                ruleList.append({u"key":key, u"val":val, u"exclude":False})

                            feedFilterList.append(ruleList)

                    except Exception as e:
                        logger.warning("The feedFilters contains an invalid rule:\n{0}".format(e))
                        continue

                    # Append the Config item to the dict
                    majorFeeds[configFile+'.'+feedName] = {u"feedName":feedName,u"feedType":feedType,u"feedDestination":feedDestination,u"minorFeeds":minorFeeds,u"feedFilters":feedFilterList}

            except Exception as e:
                logger.error("There was a problem reading a rss list file:\n{0}".format(e))

    except Exception as e:
        # This should only happen if there was an issue getting files names from the directory
        pass

    return majorFeeds
Esempio n. 15
0
def readGoodreads(configFolder=flannelfox.settings['files']['goodreadsConfigDir']):
    '''
    Read the authors a user favorites on GoodReads
    Creates a cachefile for the authors and updates it when needed
    Returns a list of authors we want to look for
    '''

    logger = logging.getLogger(__name__)
    logger.debug("Reading Goodreads Feed")

    majorFeeds = {}
    goodreadsLists = []

    try:

        for configFile in os.listdir(configFolder):
            
            # Skip non-json files
            if not configFile.endswith('.json'):
                continue

            logger.debug("Loading Goodreads config file: {0}".format(os.path.join(configFolder,configFile)))

            # Try to read in the goodreads lists
            try:
                with open(os.path.join(configFolder,configFile)) as goodreadsJson:
                    goodreadsLists = json.load(goodreadsJson)
            except Exception as e:
                logger.error("There was a problem reading the goodreads config file\n{0}".format(e))
                continue

            # Loop through the goodreads lists
            try:

                for goodreadsList in goodreadsLists:

                    # Make sure our list at least has some basic parts
                    if (goodreadsList.get("username", None) is None or
                        goodreadsList.get("api_key", None) is None or
                        goodreadsList.get("list_name", None) is None or
                        goodreadsList.get("type", None) is None or
                        goodreadsList.get("feedDestination", None) is None or
                        goodreadsList.get("minorFeeds", None) is None):

                        continue

                    # Setup some variables for the feed
                    feedName = None
                    feedType = None
                    feedDestination = None
                    minorFeeds = []
                    feedFilters = []
                    goodreadsListResults = []
                    httpResponse = -1
                    title = None
                    year = None
                    useCache = False

                    # Get the feedName
                    try:
                        feedName = unicode(goodreadsList.get("list_name",u"").lower().strip())
                        if feedName == u"":
                            raise ValueError
                    except (ValueError, KeyError) as e:
                        logger.warning("Feeds with out names are not permitted")
                        continue

                    cacheFileName = os.path.join(flannelfox.settings['files']['goodreadsCacheDir'],feedName+'.'+configFile)

                    if not os.path.exists(os.path.dirname(cacheFileName)):
                        try:
                            os.makedirs(os.path.dirname(cacheFileName))
                        except OSError as exc: # Guard against race condition
                            continue

                    headers = {
                        "user-agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36",
                    }

                    params = {
                        'key':goodreadsList.get("api_key", None)
                    }
      
                    # Get the feedType
                    try:
                        feedType = unicode(goodreadsList.get("type",u"none").lower().strip())
                    except (ValueError, KeyError) as e:
                        feedType = u"none"
                        continue

                    # Get the feedDestination
                    try:
                        feedDestination = unicode(goodreadsList.get("feedDestination",u"").strip())
                        # TODO: Check if the location exists
                    except (ValueError, KeyError) as e:
                        logger.warning("The feed has an invalid destination value")
                        continue

                    # Collect the feeds
                    try:
                        if goodreadsList.get("minorFeeds",[]) is not None and len(goodreadsList.get("minorFeeds",[])) > 0:
                            for minorFeed in goodreadsList.get("minorFeeds",[]):
                                url = unicode(minorFeed.get("url",u"").strip())
                                minTime = int(minorFeed.get("minTime",u"0").strip()) # Hours Int
                                minRatio = float(minorFeed.get("minRatio",u"0.0").strip()) # Ratio Float
                                comparison = minorFeed.get("comparison",u"or").strip() # Comparison String
                                minorFeeds.append({u"url":url,u"minTime":minTime,u"minRatio":minRatio,u"comparison":comparison})
                    except (ValueError, KeyError, TypeError) as e:
                        logger.warning("The feed contains an invalid minorFeed:\n{0}".format(e))
                        continue

                    if not __isCacheUpdateNeeded(cacheFilename=cacheFileName):
                        useCache = True

                    if not useCache:
                        try:
                            r = requests.get("{0}/user/show/{1}.xml".format(flannelfox.settings['apis']['goodreads'], goodreadsList["username"]), headers=headers, params=params, timeout=60)
                            httpResponse = r.status_code

                            if httpResponse == 200:

                                try:
                                    # Parse the RSS XML and turn it into a json list
                                    xmlData = changeCharset(r.text, "utf-8", "xml")
                                    xmlData = ET.fromstring(xmlData)
                                    xmlData = xmlData.find('user')
                                    authors = xmlData.find('favorite_authors')
                                    goodreadsListResults = []

                                    for author in authors.iter('author'):
                                        try:
                                            name = author.find('name').text

                                            if name is not None and name != "":
                                                name = unicode(name.strip())
                                                name = name.replace(u" & ",u" and ")
                                                goodreadsListResults.append(name)
                                            else:
                                                continue
                                        except:
                                            continue
                                except Exception as e:
                                    logger.error("There was a problem reading the goodreads xml file:\n-   {0}\n-    {1}".format(e,traceback.format_exc()))    

                            else:
                                logger.error("There was a problem fetching a goodreads list file: {0}".format(httpResponse))
                            
                        except Exception as e:
                            logger.error("There was a problem fetching a goodreads list file: {0}".format(e))
                            goodreadsListResults = []
                            httpResponse = -1

                        logger.error("Fetching goodreads list page: [{0}]".format(httpResponse))

                        # If we are able to get a list then cache it
                        # TODO: See if Last-Modified can be added to save this step when possible
                        if httpResponse == 200:
                            __updateCacheFile(cacheFilename=cacheFileName, data=json.dumps(goodreadsListResults))
                        else:
                            useCache = True

                    if useCache:
                        try:
                            logger.debug("Reading cache file for [{0}]".format(cacheFileName))
                            with open(cacheFileName) as cache:
                                goodreadsListResults = json.load(cache)
                        except Exception as e:
                            logger.error("There was a problem reading a goodreads list cache file: {0}".format(e))
                            continue

                    # Collect the feedFilters
                    try:
                        feedFilterList = []

                        majorFeedFilters = goodreadsList.get("filters", [])

                        for filterItem in majorFeedFilters:

                            # Loop through each author and append a filter for it
                            for item in goodreadsListResults:
                                ruleList = []

                                if goodreadsList.get("like", False):
                                    titleMatchMethod = u"titleLike"
                                else:
                                    titleMatchMethod = u"title"

                                ruleList.append({u"key":titleMatchMethod, u"val":title, u"exclude":False})
                                
                                # Load the excludes
                                for exclude in filterItem.get("exclude", []):
                                    key, val = exclude.items()[0]
                                    ruleList.append({u"key":key, u"val":val, u"exclude":True})

                                for include in filterItem.get("include", []):
                                    key, val = include.items()[0]
                                    ruleList.append({u"key":key, u"val":val, u"exclude":False})


                                feedFilterList.append(ruleList)

                    except Exception as e:
                        logger.warning("The feedFilters contains an invalid rule:\n{0}".format(e))
                        continue

                    # Append the Config item to the dict
                    majorFeeds[configFile+'.'+feedName] = {u"feedName":feedName,u"feedType":feedType,u"feedDestination":feedDestination,u"minorFeeds":minorFeeds,u"feedFilters":feedFilterList}
                #f = open("feeds_new.json", 'w')
                #json.dump(majorFeeds, f)
            except Exception as e:
                logger.error("There was a problem reading a goodreads list file:\n{0}".format(e))

    except Exception as e:
        # This should only happen if there was an issue getting files names from the directory
        pass
    logger.debug("=================")
    logger.debug("GoodreadsFilters")
    logger.debug("=================")
    logger.debug(majorFeeds)
    logger.debug("=================")
    return majorFeeds
Esempio n. 16
0
def readTraktTV(configFolder=flannelfox.settings['files']['traktConfigDir']):
    '''
    Reads the titles and other information from a specified trakt.tv list
    Content-Type:application/json
    trakt-api-version:2
    trakt-api-key:XXXX
    '''

    logger = logging.getLogger(__name__)
    logger.debug("Reading TraktTV Feed")

    majorFeeds = {}
    TRAKT_TV_LISTS = []

    try:

        for configFile in os.listdir(configFolder):
            
            # Skip non-json files
            if not configFile.endswith('.json'):
                continue

            logger.debug("Loading TraktTV config file: {0}".format(os.path.join(configFolder,configFile)))

            # Try to read in the trakt lists
            try:
                with open(os.path.join(configFolder,configFile)) as traktJson:
                    TRAKT_TV_LISTS = json.load(traktJson)
            except Exception as e:
                logger.error("There was a problem reading the trakt config file\n{0}".format(e))
                continue

            # Loop through the trakt.tv lists
            try:

                for traktList in TRAKT_TV_LISTS:

                    # Make sure our list at least has some basic parts
                    if (traktList.get("username", None) is None or
                        traktList.get("api_key", None) is None or
                        traktList.get("list_name", None) is None or
                        traktList.get("type", None) is None or
                        traktList.get("feedDestination", None) is None or
                        traktList.get("minorFeeds", None) is None):

                        continue

                    # Setup some variables for the feed
                    feedName = None
                    feedType = None
                    feedDestination = None
                    minorFeeds = []
                    feedFilters = []
                    traktListResults = []
                    httpResponse = -1
                    title = None
                    year = None
                    useCache = False

                    # Get the feedName
                    try:
                        feedName = unicode(traktList.get("list_name",u"").lower().strip())
                        if feedName == u"":
                            raise ValueError
                    except (ValueError, KeyError) as e:
                        logger.warning("Feeds with out names are not permitted")
                        continue

                    cacheFileName = os.path.join(flannelfox.settings['files']['traktCacheDir'],feedName+'.'+configFile)

                    if not os.path.exists(os.path.dirname(cacheFileName)):
                        try:
                            os.makedirs(os.path.dirname(cacheFileName))
                        except OSError as exc: # Guard against race condition
                            continue

                    headers = {
                        "user-agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36",
                        "Content-Type":"application/json",
                        "trakt-api-version":"2",
                        "trakt-api-key":traktList.get("api_key","")
                    }

      
                    # Get the feedType
                    try:
                        feedType = unicode(traktList.get("type",u"none").lower().strip())
                    except (ValueError, KeyError) as e:
                        feedType = u"none"
                        continue

                    # Get the feedDestination
                    try:
                        feedDestination = unicode(traktList.get("feedDestination",u"").strip())
                        # TODO: Check if the location exists
                    except (ValueError, KeyError) as e:
                        logger.warning("The feed has an invalid destination value")
                        continue

                    # Collect the feeds
                    try:
                        if traktList.get("minorFeeds",[]) is not None and len(traktList.get("minorFeeds",[])) > 0:
                            for minorFeed in traktList.get("minorFeeds",[]):
                                url = unicode(minorFeed.get("url",u"").strip())
                                minTime = int(minorFeed.get("minTime",u"0").strip()) # Hours Int
                                minRatio = float(minorFeed.get("minRatio",u"0.0").strip()) # Ratio Float
                                comparison = minorFeed.get("comparison",u"or").strip() # Comparison String
                                minorFeeds.append({u"url":url,u"minTime":minTime,u"minRatio":minRatio,u"comparison":comparison})
                    except (ValueError, KeyError, TypeError) as e:
                        logger.warning("The feed contains an invalid minorFeed:\n{0}".format(e))
                        continue

                    if not __isCacheUpdateNeeded(cacheFilename=cacheFileName):
                        useCache = True

                    if not useCache:
                        try:
                            r = requests.get("{0}/users/{1}/lists/{2}/items".format(flannelfox.settings['apis']['trakt'], traktList["username"], traktList["list_name"]), headers=headers, timeout=60)
                            httpResponse = r.status_code

                            if httpResponse == 200:
                                traktListResults = r.json()
                            else:
                                logger.error("There was a problem fetching a trakt list file: {0}".format(httpResponse))
                            
                        except Exception as e:
                            logger.error("There was a problem fetching a trakt list file: {0}".format(e))
                            traktListResults = []
                            httpResponse = -1

                        logger.error("Fetching trakt list page: [{0}]".format(httpResponse))

                        # If we are able to get a list then cache it
                        # TODO: See if Last-Modified can be added to save this step when possible
                        if httpResponse == 200:
                            __updateCacheFile(cacheFilename=cacheFileName, data=json.dumps(traktListResults))
                        else:
                            useCache = True

                    if useCache:
                        try:
                            logger.debug("Reading cache file for [{0}]".format(cacheFileName))
                            with open(cacheFileName) as cache:
                                traktListResults = json.load(cache)
                        except Exception as e:
                            logger.error("There was a problem reading a trakt list cache file: {0}".format(e))
                            continue

                    # Collect the feedFilters
                    try:
                        feedFilterList = []

                        majorFeedFilters = traktList.get("filters", [])

                        for filterItem in majorFeedFilters:

                            # Loop through each show and append a filter for it
                            for item in traktListResults:
                                ruleList = []

                                if traktList.get("like", False):
                                    titleMatchMethod = u"titleLike"
                                else:
                                    titleMatchMethod = u"title"

                                # Make sure we have some shows to fetch
                                # TODO: make this use the type field in the json file to determine if it should be show or movie
                                if "show" not in item and feedType == "tv":
                                    # This happens if you select the wrong type of media tv/movie
                                    continue;
                                elif "movie" not in item and feedType == "movie":
                                    # This happens if you select the wrong type of media tv/movie
                                    continue;

                                elif "show" in item and feedType == "tv":
                                    item = item["show"]
                                    title = item["title"].lower().strip().replace(u" & ", u" and ")

                                elif "movie" in item and feedType == "movie":
                                    item = item["movie"]
                                    title = item["title"].lower().strip().replace(u" & ", u" and ")
                                    year = item["year"]

                                else:
                                    continue


                                ruleList.append({u"key":titleMatchMethod, u"val":title, u"exclude":False})
                                
                                if year is not None:
                                    ruleList.append({u"key":"year", u"val":year, u"exclude":False})

                                # Load the excludes
                                for exclude in filterItem.get("exclude", []):
                                    key, val = exclude.items()[0]
                                    ruleList.append({u"key":key, u"val":val, u"exclude":True})

                                for include in filterItem.get("include", []):
                                    key, val = include.items()[0]
                                    ruleList.append({u"key":key, u"val":val, u"exclude":False})


                                feedFilterList.append(ruleList)

                    except Exception as e:
                        logger.warning("The feedFilters contains an invalid rule:\n{0}".format(e))
                        continue

                    # Append the Config item to the dict
                    majorFeeds[configFile+'.'+feedName] = {u"feedName":feedName,u"feedType":feedType,u"feedDestination":feedDestination,u"minorFeeds":minorFeeds,u"feedFilters":feedFilterList}
                #f = open("feeds_new.json", 'w')
                #json.dump(majorFeeds, f)
            except Exception as e:
                logger.error("There was a problem reading a trakt list file:\n{0}".format(e))

    except Exception as e:
        # This should only happen if there was an issue getting files names from the directory
        pass
    logger.debug("=================")
    logger.debug("TraktMajorFilters")
    logger.debug("=================")
    logger.debug(majorFeeds)
    logger.debug("=================")
    return majorFeeds
Esempio n. 17
0
def readLastfmArtists(configFolder=flannelfox.settings['files']['lastfmConfigDir']):
    '''
    Read the artists from a users lastfm library.
    Creates a cachefile for the artists and updates it when needed
    Returns a list of artists we want to look for
    '''
    logger = logging.getLogger(__name__)
    majorFeeds = {}

    try:

        for configFile in os.listdir(configFolder):

            # Skip non-json files
            if not configFile.endswith('.json'):
                continue

            logger.debug("Loading LastFM config file: {0}".format(os.path.join(configFolder,configFile)))

            # Try to read in the lastfm lists
            try:
                with open(os.path.join(configFolder,configFile)) as lastfmJson:
                    lastfmAritstsLists = json.load(lastfmJson)
            except Exception as e:
                logger.error("There was a problem reading the lastfm config file\n{0}".format(e))
                continue

            try:
                for artistsList in lastfmAritstsLists:

                    # Make sure our list at least has some basic parts
                    if (artistsList.get("username", None) is None or
                        artistsList.get("api_key", None) is None or
                        artistsList.get("list_name", None) is None or
                        artistsList.get("type", None) is None or
                        artistsList.get("feedDestination", None) is None or
                        artistsList.get("minorFeeds", None) is None):

                        continue

                    headers = {
                        "Content-Type":"application/json",
                        "user-agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.99 Safari/537.36"
                    }

                    currentPage = 1
                    maxPages = 2
                    replies = []
                    artists = []
                    feedName = None
                    feedType = None
                    feedDestination = None
                    minorFeeds = []
                    feedFilters = []
                    httpResponse = -1
                    useCache = False

                    cacheFileName = "{0}/{1}".format(flannelfox.settings['files']['lastfmCacheDir'],artistsList.get("list_name")+'.'+configFile)
                    if not os.path.exists(os.path.dirname(cacheFileName)):
                        try:
                            os.makedirs(os.path.dirname(cacheFileName))
                        except OSError as exc: # Guard against race condition
                            continue

                    # Get the feedName
                    try:
                        feedName = unicode(artistsList.get("list_name",u"").lower().strip())
                        if feedName == u"":
                            raise ValueError
                    except (ValueError, KeyError) as e:
                        logger.warning("Feeds with out names are not permitted")
                        continue

                    # Get the feedType
                    try:
                        feedType = unicode(artistsList.get("type",u"none").lower().strip())
                    except (ValueError, KeyError) as e:
                        feedType = u"none"
                        continue

                    # Get the feedDestination
                    try:
                        feedDestination = unicode(artistsList.get("feedDestination",u"").strip())
                        # TODO: Check if the location exists
                    except (ValueError, KeyError) as e:
                        logger.warning("The feed has an invalid destination value")
                        continue

                    # Collect the feeds
                    try:
                        if artistsList.get("minorFeeds",[]) is not None and len(artistsList.get("minorFeeds",[])) > 0:
                            for minorFeed in artistsList.get("minorFeeds",[]):
                                url = unicode(minorFeed.get("url",u"").strip())
                                minTime = int(minorFeed.get("minTime",u"0").strip()) # Hours Int
                                minRatio = float(minorFeed.get("minRatio",u"0.0").strip()) # Ratio Float
                                comparison = minorFeed.get("comparison",u"or").strip() # Comparison String
                                minorFeeds.append({u"url":url,u"minTime":minTime,u"minRatio":minRatio,u"comparison":comparison})
                    except (ValueError, KeyError, TypeError) as e:
                        logger.warning("The feed contains an invalid minorFeed:\n{0}".format(e))
                        continue

                    if not __isCacheUpdateNeeded(cacheFilename=cacheFileName):
                        useCache = True

                    if not useCache:
                        while currentPage <= maxPages:
                            reply = None

                            call = {}
                            call.update({"method":"library.getArtists"})
                            call.update({"api_key":artistsList.get("api_key")})
                            call.update({"user":artistsList.get("username")})
                            call.update({"format":"json"})
                            call.update({"page":currentPage})

                            try:
                                # TODO: Covert this to a pool
                                r = requests.get(flannelfox.settings['apis']['lastfm'], headers=headers, params=call, timeout=60)
                                httpResponse = r.status_code
                            except Exception as e:
                                httpResponse = -1
                                logger.error("There was a problem fetching a Lastfm album page\n{0}".format(httpResponse))

                            if httpResponse == 200:
                                reply = r.json()
                            else:
                                logger.error("There was a problem fetching a Lastfm album page\n{0}".format(httpResponse))
                                replies = []
                                break # TODO: Replace this with an exception

                            maxPages = int(reply["artists"]["@attr"]["totalPages"])
                            replies.extend(reply["artists"]["artist"][:])
                            logger.error("Fetching Lastfm album page {0} of {1}: [{2}]".format(currentPage, maxPages, httpResponse))
                            currentPage = currentPage + 1

                        for artist in replies:
                            artists.append(artist["name"])

                        # If we are able to get a list then cache it
                        # TODO: See if Last-Modified can be added to save this step when possible
                        if httpResponse == 200:
                            __updateCacheFile(cacheFilename=cacheFileName, data=json.dumps(artists))
                        else:
                            useCache = True

                    if useCache:
                        try:
                            logger.debug("Reading cache file for [{0}]".format(cacheFileName))
                            with open(cacheFileName) as cache:
                                artists = json.load(cache)
                        except Exception as e:
                            logger.error("There was a problem reading a lastfm list cache file: {0}".format(e))
                            continue

                    # Collect the feedFilters
                    try:
                        feedFilterList = []

                        majorFeedFilters = artistsList.get("filters", [])

                        for filterItem in majorFeedFilters:
                        
                            # Loop through each show and append a filter for it
                            for artist in artists:

                                ruleList = []

                                # Clean the artist name
                                artist = artist.lower().strip().replace(u" & ", u" and ")

                                ruleList.append({u"key":"artist", u"val":artist, u"exclude":False})

                                # Load the excludes
                                for exclude in filterItem.get("exclude", []):
                                    key, val = exclude.items()[0]
                                    ruleList.append({u"key":key, u"val":val, u"exclude":True})

                                for include in filterItem.get("include", []):
                                    key, val = include.items()[0]
                                    ruleList.append({u"key":key, u"val":val, u"exclude":False})

                                feedFilterList.append(ruleList)

                    except Exception as e:
                        logger.warning("The feedFilters contains an invalid rule:\n{0}".format(e))
                        continue

                    # Append the Config item to the dict
                    majorFeeds[feedName] = {u"feedName":feedName,u"feedType":feedType,u"feedDestination":feedDestination,u"minorFeeds":minorFeeds,u"feedFilters":feedFilterList}

            except Exception as e:
                logger.error("There was a problem reading a lastfm artists list file:\n{0}".format(e))
                httpResponse = -1
                artists = []
    except Exception as e:
        # This should only happen if there was an issue getting files names from the directory
        pass


    return majorFeeds