示例#1
0
 def run(self):
     logger.info('[VersionCheck] Checking for new release on Github.')
     helpers.job_management(write=True, job='Check Version', current_run=helpers.utctimestamp(), status='Running')
     mylar.VERSION_STATUS = 'Running'
     versioncheck.checkGithub()
     helpers.job_management(write=True, job='Check Version', last_run_completed=helpers.utctimestamp(), status='Waiting')
     mylar.VERSION_STATUS = 'Waiting'
示例#2
0
文件: searchit.py 项目: 2mny/mylar
    def run(self):

        logger.info('[SEARCH] Running Search for Wanted.')
        helpers.job_management(write=True, job='Auto-Search', current_run=helpers.utctimestamp(), status='Running')
        mylar.SEARCH_STATUS = 'Running'
        mylar.search.searchforissue()
        helpers.job_management(write=True, job='Auto-Search', last_run_completed=helpers.utctimestamp(), status='Waiting')
        mylar.SEARCH_STATUS = 'Waiting'
示例#3
0
 def run(self):
     logger.info('[WEEKLY] Checking Weekly Pull-list for new releases/updates')
     helpers.job_management(write=True, job='Weekly Pullist', current_run=helpers.utctimestamp(), status='Running')
     mylar.WEEKLY_STATUS = 'Running'
     weeklypull.pullit()
     weeklypull.future_check()
     helpers.job_management(write=True, job='Weekly Pullist', last_run_completed=helpers.utctimestamp(), status='Waiting')
     mylar.WEEKLY_STATUS = 'Waiting'
示例#4
0
 def run(self, sched):
     logger.info('[DBUpdate] Updating Database.')
     helpers.job_management(write=True,
                            job='DB Updater',
                            current_run=helpers.utctimestamp(),
                            status='Running')
     mylar.updater.dbUpdate(sched=sched)
     helpers.job_management(write=True,
                            job='DB Updater',
                            last_run_completed=helpers.utctimestamp(),
                            status='Waiting')
示例#5
0
文件: searchit.py 项目: qubidt/mylar3
    def run(self):

        logger.info('[SEARCH] Running Search for Wanted.')
        helpers.job_management(write=True,
                               job='Auto-Search',
                               current_run=helpers.utctimestamp(),
                               status='Running')
        mylar.SEARCH_STATUS = 'Running'
        mylar.search.searchforissue()
        helpers.job_management(write=True,
                               job='Auto-Search',
                               last_run_completed=helpers.utctimestamp(),
                               status='Waiting')
示例#6
0
 def run(self):
     logger.info('[VersionCheck] Checking for new release on Github.')
     helpers.job_management(write=True,
                            job='Check Version',
                            current_run=helpers.utctimestamp(),
                            status='Running')
     mylar.VERSION_STATUS = 'Running'
     versioncheck.checkGithub()
     helpers.job_management(write=True,
                            job='Check Version',
                            last_run_completed=helpers.utctimestamp(),
                            status='Waiting')
     mylar.VERSION_STATUS = 'Waiting'
示例#7
0
 def run(self):
     logger.info(
         '[WEEKLY] Checking Weekly Pull-list for new releases/updates')
     helpers.job_management(write=True,
                            job='Weekly Pullist',
                            current_run=helpers.utctimestamp(),
                            status='Running')
     mylar.WEEKLY_STATUS = 'Running'
     weeklypull.pullit()
     weeklypull.future_check()
     helpers.job_management(write=True,
                            job='Weekly Pullist',
                            last_run_completed=helpers.utctimestamp(),
                            status='Waiting')
示例#8
0
    def update_db(self):

        # mylar.MAINTENANCE_UPDATE will indicate what's being updated in the db
        if mylar.MAINTENANCE_UPDATE:
            self.db_version_check(display=False)

            # backup mylar.db here
            self.backup_files(dbs=True)

            for dmode in mylar.MAINTENANCE_UPDATE:
                if dmode['mode'] == 'rss update':
                    logger.info(
                        '[MAINTENANCE-MODE][DB-CONVERSION] Updating dB due to RSS table conversion'
                    )
                    if dmode['resume'] > 0:
                        logger.info(
                            '[MAINTENANCE-MODE][DB-CONVERSION][DB-RECOVERY] Attempting to resume conversion from previous run (starting at record: %s)'
                            % dmode['resume'])

                #force set logging to warning level only so the progress indicator can be displayed in console
                prev_log_level = mylar.LOG_LEVEL
                self.toggle_logging(level=0)

                if dmode['mode'] == 'rss update':
                    self.sql_attachmylar()

                    row_cnt = self.dbmylar.execute(
                        "SELECT COUNT(rowid) as count FROM rssdb")
                    rowcnt = row_cnt.fetchone()[0]
                    mylar.MAINTENANCE_DB_TOTAL = rowcnt

                    if dmode['resume'] > 0:
                        xt = self.dbmylar.execute(
                            "SELECT rowid, Title FROM rssdb WHERE rowid >= ? ORDER BY rowid ASC",
                            [dmode['resume']])
                    else:
                        xt = self.dbmylar.execute(
                            "SELECT rowid, Title FROM rssdb ORDER BY rowid ASC"
                        )
                    xlist = xt.fetchall()

                    mylar.MAINTENANCE_DB_COUNT = 0

                    if xlist is None:
                        print('Nothing in the rssdb to update. Ignoring.')
                        return True

                    try:
                        if dmode['resume'] > 0 and xlist is not None:
                            logger.info('resume set at : %s' %
                                        (xlist[dmode['resume']], ))
                            #xlist[dmode['resume']:]
                            mylar.MAINTENANCE_DB_COUNT = dmode['resume']
                    except Exception as e:
                        print(
                            '[ERROR:%s] - table resume location is not accureate. Starting from start, but this should go quick..'
                            % e)
                        xt = self.dbmylar.execute(
                            "SELECT rowid, Title FROM rssdb ORDER BY rowid ASC"
                        )
                        xlist = xt.fetchall()
                        dmode['resume'] = 0

                    if xlist:
                        resultlist = []
                        delete_rows = []
                        for x in self.progressBar(xlist,
                                                  prefix='Progress',
                                                  suffix='Complete',
                                                  length=50,
                                                  resume=dmode['resume']):

                            #signal capture here since we can't do it as per normal
                            if any([
                                    mylar.SIGNAL == 'shutdown',
                                    mylar.SIGNAL == 'restart'
                            ]):
                                try:
                                    self.dbmylar.executemany(
                                        "UPDATE rssdb SET Issue_Number=?, ComicName=? WHERE rowid=?",
                                        (resultlist))
                                    self.sql_closemylar()
                                except Exception as e:
                                    print('error: %s' % e)
                                else:
                                    send_it = {
                                        'mode': dmode['mode'],
                                        'version': self.db_version,
                                        'status': 'incomplete',
                                        'total': mylar.MAINTENANCE_DB_TOTAL,
                                        'current': mylar.MAINTENANCE_DB_COUNT,
                                        'last_run': helpers.utctimestamp()
                                    }
                                    self.db_update_status(send_it)

                                #toggle back the logging level to what it was originally.
                                self.toggle_logging(level=prev_log_level)

                                if mylar.SIGNAL == 'shutdown':
                                    logger.info(
                                        '[MAINTENANCE-MODE][DB-CONVERSION][SHUTDOWN]Shutting Down...'
                                    )
                                    return False
                                else:
                                    logger.info(
                                        '[MAINTENANCE-MODE][DB-CONVERSION][RESTART]Restarting...'
                                    )
                                    return True

                            mylar.MAINTENANCE_DB_COUNT += 1
                            if not x[1]:
                                logger.fdebug(
                                    '[MAINTENANCE-MODE][DB-CONVERSION][JUNK-NAME] %s'
                                    % x[1])
                                delete_rows.append((x[0], ))
                                continue
                            try:
                                if any(
                                        ext in x[1] for ext in
                                    ['yenc', '.pdf', '.rar', '.mp4', '.avi']):
                                    logger.fdebug(
                                        '[MAINTENANCE-MODE][DB-CONVERSION][JUNK-NAME] %s'
                                        % x[1])
                                    delete_rows.append((x[0], ))
                                    continue
                                else:
                                    flc = filechecker.FileChecker(file=x[1])
                                    filelist = flc.listFiles()
                            except Exception as e:
                                logger.fdebug(
                                    '[MAINTENANCE-MODE][DB-CONVERSION][JUNK-NAME] %s'
                                    % x[1])
                                delete_rows.append((x[0], ))
                                continue
                            else:
                                if all([
                                        filelist['series_name'] != '',
                                        filelist['series_name'] is not None
                                ]) and filelist['issue_number'] != '-':
                                    issuenumber = filelist['issue_number']
                                    seriesname = re.sub(
                                        r'[\u2014|\u2013|\u2e3a|\u2e3b]', '-',
                                        filelist['series_name']).strip()
                                    if seriesname.endswith(
                                            '-') and '#' in seriesname[-6:]:
                                        ck1 = seriesname.rfind('#')
                                        ck2 = seriesname.rfind('-')
                                        if seriesname[ck1 + 1:ck2 -
                                                      1].strip().isdigit():
                                            issuenumber = '%s %s' % (
                                                seriesname[ck1:].strip(),
                                                issuenumber)
                                            seriesname = seriesname[:ck1 -
                                                                    1].strip()
                                            issuenumber.strip()
                                    resultlist.append(
                                        (issuenumber, seriesname.strip(),
                                         x[0]))

                                if len(resultlist) > 500:
                                    # write it out every 5000 records.
                                    try:
                                        logger.fdebug('resultlist: %s' %
                                                      (resultlist, ))
                                        self.dbmylar.executemany(
                                            "UPDATE rssdb SET Issue_Number=?, ComicName=? WHERE rowid=?",
                                            (resultlist))
                                        self.sql_closemylar()
                                        # update the update_db so if it has to resume it doesn't from the beginning or wrong point ( last 5000th write ).
                                        send_it = {
                                            'mode': dmode['mode'],
                                            'version': self.db_version,
                                            'status': 'incomplete',
                                            'total':
                                            mylar.MAINTENANCE_DB_TOTAL,
                                            'current':
                                            mylar.MAINTENANCE_DB_COUNT,
                                            'last_run': helpers.utctimestamp()
                                        }
                                        self.db_update_status(send_it)

                                    except Exception as e:
                                        print('error: %s' % e)
                                        return False
                                    else:
                                        logger.fdebug('reattaching')
                                        self.sql_attachmylar()
                                        resultlist = []

                        try:
                            if len(resultlist) > 0:
                                self.dbmylar.executemany(
                                    "UPDATE rssdb SET Issue_Number=?, ComicName=? WHERE rowid=?",
                                    (resultlist))
                                self.sql_closemylar()
                        except Exception as e:
                            print('error: %s' % e)
                            return False
                        else:
                            try:
                                send_it = {
                                    'mode': dmode['mode'],
                                    'version': 1,
                                    'status': 'complete',
                                    'total': mylar.MAINTENANCE_DB_TOTAL,
                                    'current': mylar.MAINTENANCE_DB_COUNT,
                                    'last_run': helpers.utctimestamp()
                                }
                            except Exception as e:
                                print('error_sendit: %s' % e)
                            else:
                                self.db_update_status(send_it)

                            if delete_rows:
                                # only do this on completion, or else the rowids will be different and it will mess up a rerun
                                try:
                                    self.sql_attachmylar()
                                    print(
                                        '[MAINTENANCE-MODE][DB-CONVERSION][CLEANUP] Removing %s invalid RSS entries from table...'
                                        % len(delete_rows))
                                    self.dbmylar.executemany(
                                        "DELETE FROM rssdb WHERE rowid=?",
                                        (delete_rows))
                                    self.sql_closemylar()
                                except Exception as e:
                                    print('error: %s' % e)
                                else:
                                    self.sql_attachmylar()
                                    print(
                                        '[MAINTENANCE-MODE][DB-CONVERSION][CLEANUP] Cleaning up...'
                                    )
                                    self.dbmylar.execute("VACUUM")
                            else:
                                print(
                                    '[MAINTENANCE-MODE][DB-CONVERSION][CLEANUP] Cleaning up...'
                                )
                                self.sql_attachmylar()
                                self.dbmylar.execute("VACUUM")

                            self.sql_closemylar()

                            #toggle back the logging level to what it was originally.
                            self.toggle_logging(level=prev_log_level)
                            logger.info(
                                '[MAINTENANCE-MODE][DB-CONVERSION] Updating dB complete! (%s / %s)'
                                % (mylar.MAINTENANCE_DB_COUNT,
                                   mylar.MAINTENANCE_DB_TOTAL))
                            mylar.MAINTENANCE_UPDATE[:] = [
                                x for x in mylar.MAINTENANCE_UPDATE
                                if not ('rss update' == x.get('mode'))
                            ]

        else:
            mylar.MAINTENANCE_DB_COUNT = 0
            logger.info(
                '[MAINTENANCE-MODE] Update DB set to start - but nothing was provided as to what. Returning to non-maintenance mode'
            )
        return True
示例#9
0
    def run(self, forcerss=None):
        with rss_lock:

            #logger.info('[RSS-FEEDS] RSS Feed Check was last run at : ' + str(mylar.SCHED_RSS_LAST))
            firstrun = "no"
            #check the last run of rss to make sure it's not hammering.
            if mylar.SCHED_RSS_LAST is None or mylar.SCHED_RSS_LAST == '' or mylar.SCHED_RSS_LAST == '0' or forcerss == True:
                logger.info('[RSS-FEEDS] RSS Feed Check Initalizing....')
                firstrun = "yes"
                duration_diff = 0
            else:
                tstamp = float(mylar.SCHED_RSS_LAST)
                duration_diff = abs(helpers.utctimestamp() - tstamp) / 60
            #logger.fdebug('[RSS-FEEDS] Duration diff: %s' % duration_diff)
            if firstrun == "no" and duration_diff < int(
                    mylar.CONFIG.RSS_CHECKINTERVAL):
                logger.fdebug(
                    '[RSS-FEEDS] RSS Check has taken place less than the threshold - not initiating at this time.'
                )
                return

            helpers.job_management(write=True,
                                   job='RSS Feeds',
                                   current_run=helpers.utctimestamp(),
                                   status='Running')
            mylar.RSS_STATUS = 'Running'
            #logger.fdebug('[RSS-FEEDS] Updated RSS Run time to : ' + str(mylar.SCHED_RSS_LAST))

            #function for looping through nzbs/torrent feeds
            if mylar.CONFIG.ENABLE_TORRENT_SEARCH:
                logger.info('[RSS-FEEDS] Initiating Torrent RSS Check.')
                if mylar.CONFIG.ENABLE_PUBLIC:
                    logger.info(
                        '[RSS-FEEDS] Initiating Torrent RSS Feed Check on Demonoid / WorldWideTorrents.'
                    )
                    #rsscheck.torrents(pickfeed='3')   #TP.SE RSS Check (has to be page-parsed)
                    rsscheck.torrents(pickfeed='Public'
                                      )  #TPSE = DEM RSS Check + WWT RSS Check
                if mylar.CONFIG.ENABLE_32P:
                    logger.info(
                        '[RSS-FEEDS] Initiating Torrent RSS Feed Check on 32P.'
                    )
                    if mylar.CONFIG.MODE_32P == 0:
                        logger.fdebug(
                            '[RSS-FEEDS] 32P mode set to Legacy mode. Monitoring New Releases feed only.'
                        )
                        if any([
                                mylar.CONFIG.PASSKEY_32P is None,
                                mylar.CONFIG.PASSKEY_32P == '',
                                mylar.CONFIG.RSSFEED_32P is None,
                                mylar.CONFIG.RSSFEED_32P == ''
                        ]):
                            logger.error(
                                '[RSS-FEEDS] Unable to validate information from provided RSS Feed. Verify that the feed provided is a current one.'
                            )
                        else:
                            rsscheck.torrents(pickfeed='1',
                                              feedinfo=mylar.KEYS_32P)
                    else:
                        logger.fdebug(
                            '[RSS-FEEDS] 32P mode set to Auth mode. Monitoring all personal notification feeds & New Releases feed'
                        )
                        if any([
                                mylar.CONFIG.USERNAME_32P is None,
                                mylar.CONFIG.USERNAME_32P == '',
                                mylar.CONFIG.PASSWORD_32P is None
                        ]):
                            logger.error(
                                '[RSS-FEEDS] Unable to sign-on to 32P to validate settings. Please enter/check your username password in the configuration.'
                            )
                        else:
                            if mylar.KEYS_32P is None:
                                feed32p = auth32p.info32p()
                                feedinfo = feed32p.authenticate()
                                if feedinfo != "disable":
                                    pass
                                else:
                                    mylar.CONFIG.ENABLE_32P = 0
                                    #mylar.config_write()
                            else:
                                feedinfo = mylar.FEEDINFO_32P

                            if feedinfo is None or len(
                                    feedinfo) == 0 or feedinfo == "disable":
                                logger.error(
                                    '[RSS-FEEDS] Unable to retrieve any information from 32P for RSS Feeds. Skipping for now.'
                                )
                            else:
                                rsscheck.torrents(pickfeed='1',
                                                  feedinfo=feedinfo[0])
                                x = 0
                                #assign personal feeds for 32p > +8
                                for fi in feedinfo:
                                    x += 1
                                    pfeed_32p = str(7 + x)
                                    rsscheck.torrents(pickfeed=pfeed_32p,
                                                      feedinfo=fi)

            logger.info(
                '[RSS-FEEDS] Initiating RSS Feed Check for NZB Providers.')
            rsscheck.nzbs(forcerss=forcerss)
            logger.info('[RSS-FEEDS] RSS Feed Check/Update Complete')
            logger.info('[RSS-FEEDS] Watchlist Check for new Releases')
            mylar.search.searchforissue(rsscheck='yes')
            logger.info('[RSS-FEEDS] Watchlist Check complete.')
            if forcerss:
                logger.info('[RSS-FEEDS] Successfully ran a forced RSS Check.')
            helpers.job_management(write=True,
                                   job='RSS Feeds',
                                   last_run_completed=helpers.utctimestamp(),
                                   status='Waiting')
            mylar.RSS_STATUS = 'Waiting'
            return True
示例#10
0
文件: dbupdater.py 项目: 2mny/mylar
 def run(self, sched):
     logger.info('[DBUpdate] Updating Database.')
     helpers.job_management(write=True, job='DB Updater', current_run=helpers.utctimestamp(), status='Running')
     mylar.updater.dbUpdate(sched=sched)
     helpers.job_management(write=True, job='DB Updater', last_run_completed=helpers.utctimestamp(), status='Waiting')
示例#11
0
文件: rsscheckit.py 项目: claym/mylar
    def run(self, forcerss=None):
        with rss_lock:

            #logger.info('[RSS-FEEDS] RSS Feed Check was last run at : ' + str(mylar.SCHED_RSS_LAST))
            firstrun = "no"
            #check the last run of rss to make sure it's not hammering.
            if mylar.SCHED_RSS_LAST is None or mylar.SCHED_RSS_LAST == '' or mylar.SCHED_RSS_LAST == '0' or forcerss == True:
                logger.info('[RSS-FEEDS] RSS Feed Check Initalizing....')
                firstrun = "yes"
                duration_diff = 0
            else:
                tstamp = float(mylar.SCHED_RSS_LAST)
                duration_diff = abs(helpers.utctimestamp() - tstamp)/60
            #logger.fdebug('[RSS-FEEDS] Duration diff: %s' % duration_diff)
            if firstrun == "no" and duration_diff < int(mylar.CONFIG.RSS_CHECKINTERVAL):
                logger.fdebug('[RSS-FEEDS] RSS Check has taken place less than the threshold - not initiating at this time.')
                return

            helpers.job_management(write=True, job='RSS Feeds', current_run=helpers.utctimestamp(), status='Running')
            mylar.RSS_STATUS = 'Running'
            #logger.fdebug('[RSS-FEEDS] Updated RSS Run time to : ' + str(mylar.SCHED_RSS_LAST))

            #function for looping through nzbs/torrent feeds
            if mylar.CONFIG.ENABLE_TORRENT_SEARCH:
                logger.info('[RSS-FEEDS] Initiating Torrent RSS Check.')
                if mylar.CONFIG.ENABLE_PUBLIC:
                    logger.info('[RSS-FEEDS] Initiating Torrent RSS Feed Check on Demonoid / WorldWideTorrents.')
                    rsscheck.torrents(pickfeed='Public')    #TPSE = DEM RSS Check + WWT RSS Check
                if mylar.CONFIG.ENABLE_32P is True:
                    logger.info('[RSS-FEEDS] Initiating Torrent RSS Feed Check on 32P.')
                    if mylar.CONFIG.MODE_32P == 0:
                        logger.fdebug('[RSS-FEEDS] 32P mode set to Legacy mode. Monitoring New Releases feed only.')
                        if any([mylar.CONFIG.PASSKEY_32P is None, mylar.CONFIG.PASSKEY_32P == '', mylar.CONFIG.RSSFEED_32P is None, mylar.CONFIG.RSSFEED_32P == '']):
                            logger.error('[RSS-FEEDS] Unable to validate information from provided RSS Feed. Verify that the feed provided is a current one.')
                        else:
                            rsscheck.torrents(pickfeed='1', feedinfo=mylar.KEYS_32P)
                    else:
                        logger.fdebug('[RSS-FEEDS] 32P mode set to Auth mode. Monitoring all personal notification feeds & New Releases feed')
                        if any([mylar.CONFIG.USERNAME_32P is None, mylar.CONFIG.USERNAME_32P == '', mylar.CONFIG.PASSWORD_32P is None]):
                            logger.error('[RSS-FEEDS] Unable to sign-on to 32P to validate settings. Please enter/check your username password in the configuration.')
                        else:
                            if mylar.KEYS_32P is None:
                                feed32p = auth32p.info32p()
                                feedinfo = feed32p.authenticate()
                                if feedinfo != "disable":
                                    pass
                                else:
                                    helpers.disable_provider('32P')
                            else:
                                feedinfo = mylar.FEEDINFO_32P

                            if feedinfo is None or len(feedinfo) == 0 or feedinfo == "disable":
                                logger.error('[RSS-FEEDS] Unable to retrieve any information from 32P for RSS Feeds. Skipping for now.')
                            else:
                                rsscheck.torrents(pickfeed='1', feedinfo=feedinfo[0])
                                x = 0
                                #assign personal feeds for 32p > +8
                                for fi in feedinfo:
                                    x+=1
                                    pfeed_32p = str(7 + x)
                                    rsscheck.torrents(pickfeed=pfeed_32p, feedinfo=fi)

            logger.info('[RSS-FEEDS] Initiating RSS Feed Check for NZB Providers.')
            rsscheck.nzbs(forcerss=forcerss)
            logger.info('[RSS-FEEDS] RSS Feed Check/Update Complete')
            logger.info('[RSS-FEEDS] Watchlist Check for new Releases')
            mylar.search.searchforissue(rsscheck='yes')
            logger.info('[RSS-FEEDS] Watchlist Check complete.')
            if forcerss:
                logger.info('[RSS-FEEDS] Successfully ran a forced RSS Check.')
            helpers.job_management(write=True, job='RSS Feeds', last_run_completed=helpers.utctimestamp(), status='Waiting')
            mylar.RSS_STATUS = 'Waiting'
            return True
示例#12
0
文件: filers.py 项目: qubidt/mylar3
    def walk_the_walk(self):
        folder_location = mylar.CONFIG.FOLDER_CACHE_LOCATION
        if folder_location is None:
            return {'status': False}

        logger.info('checking locally...')
        filelist = None

        logger.info('check_folder_cache: %s' % (mylar.CHECK_FOLDER_CACHE))
        if mylar.CHECK_FOLDER_CACHE is not None:
            rd = mylar.CHECK_FOLDER_CACHE #datetime.datetime.utcfromtimestamp(mylar.CHECK_FOLDER_CACHE)
            rd_mins = rd + datetime.timedelta(seconds = 600)  #10 minute cache retention
            rd_now = datetime.datetime.utcfromtimestamp(time.time())
            if calendar.timegm(rd_mins.utctimetuple()) > calendar.timegm(rd_now.utctimetuple()):
                # if < 10 minutes since last check, use cached listing
                logger.info('using cached folder listing since < 10 minutes since last file check.')
                filelist = mylar.FOLDER_CACHE

        if filelist is None:
            logger.info('generating new directory listing for folder_cache')
            flc = filechecker.FileChecker(folder_location, justparse=True, pp_mode=True)
            mylar.FOLDER_CACHE = flc.listFiles()
            mylar.CHECK_FOLDER_CACHE = datetime.datetime.utcfromtimestamp(helpers.utctimestamp())

        local_status = False
        filepath = None
        filename = None
        for fl in mylar.FOLDER_CACHE['comiclist']:
            logger.info('fl: %s' % (fl,))
            if self.arc is not None:
                comicname = self.arc['ComicName']
                corrected_type = None
                alternatesearch = None
                booktype = self.arc['Type']
                publisher = self.arc['Publisher']
                issuenumber = self.arc['IssueNumber']
                issuedate = self.arc['IssueDate']
                issuename = self.arc['IssueName']
                issuestatus = self.arc['Status']
            elif self.comic is not None:
                comicname = self.comic['ComicName']
                booktype = self.comic['Type']
                corrected_type = self.comic['Corrected_Type']
                alternatesearch = self.comic['AlternateSearch']
                publisher = self.comic['ComicPublisher']
                issuenumber = self.issue['Issue_Number']
                issuedate = self.issue['IssueDate']
                issuename = self.issue['IssueName']
                issuestatus = self.issue['Status']
            else:
                # weekly - (one/off)
                comicname = self.weekly['COMIC']
                booktype = self.weekly['format']
                corrected_type = None
                alternatesearch = None
                publisher = self.weekly['PUBLISHER']
                issuenumber = self.weekly['ISSUE']
                issuedate = self.weekly['SHIPDATE']
                issuename = None
                issuestatus = self.weekly['STATUS']

            if booktype is not None:
                if (all([booktype != 'Print', booktype != 'Digital', booktype != 'None', booktype is not None]) and corrected_type != 'Print') or any([corrected_type == 'TPB', corrected_type == 'GN', corrected_type == 'HC']):
                    if booktype == 'One-Shot' and corrected_type is None:
                        booktype = 'One-Shot'
                    else:
                        if booktype == 'GN' and corrected_type is None:
                            booktype = 'GN'
                        elif booktype == 'HC' and corrected_type is None:
                            booktype = 'HC'
                        else:
                            booktype = 'TPB'

            wm = filechecker.FileChecker(watchcomic=comicname, Publisher=publisher, AlternateSearch=alternatesearch)
            watchmatch = wm.matchIT(fl)

            logger.info('watchmatch: %s' % (watchmatch,))

            # this is all for a really general type of match - if passed, the post-processing checks will do the real brunt work
            if watchmatch['process_status'] == 'fail':
                continue

            if watchmatch['justthedigits'] is not None:
                temploc= watchmatch['justthedigits'].replace('_', ' ')
                if "Director's Cut" not in temploc:
                    temploc = re.sub('[\#\']', '', temploc)
            else:
                if any([booktype == 'TPB', booktype =='GN', booktype == 'HC', booktype == 'One-Shot']):
                    temploc = '1'
                else:
                    temploc = None
                    continue

            int_iss = helpers.issuedigits(issuenumber)
            issyear = issuedate[:4]
            old_status = issuestatus
            issname = issuename


            if temploc is not None:
                fcdigit = helpers.issuedigits(temploc)
            elif any([booktype == 'TPB', booktype == 'GN', booktype == 'GC', booktype == 'One-Shot']) and temploc is None:
                fcdigit = helpers.issuedigits('1')

            if int(fcdigit) == int_iss:
                logger.fdebug('[%s] Issue match - #%s' % (self.issueid, self.issue['Issue_Number']))
                local_status = True
                if watchmatch['sub'] is None:
                    filepath = watchmatch['comiclocation']
                    filename = watchmatch['comicfilename']
                else:
                    filepath = os.path.join(watchmatch['comiclocation'], watchmatch['sub'])
                    filename = watchmatch['comicfilename']
                break


        #if local_status is True:
            #try:
            #    copied_folder = os.path.join(mylar.CONFIG.CACHE_DIR, 'tmp_filer')
            #    if os.path.exists(copied_folder):
            #        shutil.rmtree(copied_folder)
            #    os.mkdir(copied_folder)
            #    logger.info('created temp directory: %s' % copied_folder)
            #    shutil.copy(os.path.join(filepath, filename), copied_folder)

            #except Exception as e:
            #    logger.error('[%s] error: %s' % (e, filepath))
            #    filepath = None
            #    local_status = False
            #else:
            #filepath = os.path.join(copied_folder, filename)
            #logger.info('Successfully copied file : %s' % filepath)

        return {'status': local_status,
                'filename': filename,
                'filepath': filepath}
示例#13
0
def start():

    global _INITIALIZED, started

    with INIT_LOCK:

        if _INITIALIZED:

            #load up the previous runs from the job sql table so we know stuff...
            monitors = helpers.job_management()
            SCHED_WEEKLY_LAST = monitors['weekly']
            SCHED_SEARCH_LAST = monitors['search']
            SCHED_UPDATER_LAST = monitors['dbupdater']
            SCHED_MONITOR_LAST = monitors['monitor']
            SCHED_VERSION_LAST = monitors['version']
            SCHED_RSS_LAST = monitors['rss']

            # Start our scheduled background tasks
            SCHED.add_job(func=updater.dbUpdate, id='dbupdater', name='DB Updater', args=[None,None,True], trigger=IntervalTrigger(hours=0, minutes=5, timezone='UTC'))

            #let's do a run at the Wanted issues here (on startup) if enabled.
            ss = searchit.CurrentSearcher()
            if CONFIG.NZB_STARTUP_SEARCH:
                SCHED.add_job(func=ss.run, id='search', next_run_time=datetime.datetime.utcnow(), name='Auto-Search', trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
            else:
                if SCHED_SEARCH_LAST is not None:
                    search_timestamp = float(SCHED_SEARCH_LAST)
                    logger.fdebug('[AUTO-SEARCH] Search last run @ %s' % datetime.datetime.utcfromtimestamp(search_timestamp))
                else:
                    search_timestamp = helpers.utctimestamp() + (int(CONFIG.SEARCH_INTERVAL) *60)

                duration_diff = (helpers.utctimestamp() - search_timestamp)/60
                if duration_diff >= int(CONFIG.SEARCH_INTERVAL):
                    logger.fdebug('[AUTO-SEARCH]Auto-Search set to a delay of one minute before initialization as it has been %s minutes since the last run' % duration_diff)
                    SCHED.add_job(func=ss.run, id='search', name='Auto-Search', trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
                else:
                    search_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + ((int(CONFIG.SEARCH_INTERVAL) * 60)  - (duration_diff*60)))
                    logger.fdebug('[AUTO-SEARCH] Scheduling next run @ %s every %s minutes' % (search_diff, CONFIG.SEARCH_INTERVAL))
                    SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=search_diff, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))

            if all([CONFIG.ENABLE_TORRENTS, CONFIG.AUTO_SNATCH, OS_DETECT != 'Windows']) and any([CONFIG.TORRENT_DOWNLOADER == 2, CONFIG.TORRENT_DOWNLOADER == 4]):
                logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....')
                SNPOOL = threading.Thread(target=helpers.worker_main, args=(SNATCHED_QUEUE,), name="AUTO-SNATCHER")
                SNPOOL.start()
                logger.info('[AUTO-SNATCHER] Succesfully started Auto-Snatch add-on - will now monitor for completed torrents on client....')

            if CONFIG.POST_PROCESSING is True and ( all([CONFIG.NZB_DOWNLOADER == 0, CONFIG.SAB_CLIENT_POST_PROCESSING is True]) or all([CONFIG.NZB_DOWNLOADER == 1, CONFIG.NZBGET_CLIENT_POST_PROCESSING is True]) ):
                if CONFIG.NZB_DOWNLOADER == 0:
                    logger.info('[SAB-MONITOR] Completed post-processing handling enabled for SABnzbd. Attempting to background load....')
                elif CONFIG.NZB_DOWNLOADER == 1:
                    logger.info('[NZBGET-MONITOR] Completed post-processing handling enabled for NZBGet. Attempting to background load....')
                NZBPOOL = threading.Thread(target=helpers.nzb_monitor, args=(NZB_QUEUE,), name="AUTO-COMPLETE-NZB")
                NZBPOOL.start()
                if CONFIG.NZB_DOWNLOADER == 0:
                    logger.info('[AUTO-COMPLETE-NZB] Succesfully started Completed post-processing handling for SABnzbd - will now monitor for completed nzbs within sabnzbd and post-process automatically....')
                elif CONFIG.NZB_DOWNLOADER == 1:
                    logger.info('[AUTO-COMPLETE-NZB] Succesfully started Completed post-processing handling for NZBGet - will now monitor for completed nzbs within nzbget and post-process automatically....')


            helpers.latestdate_fix()

            if CONFIG.ALT_PULL == 2:
                weektimer = 4
            else:
                weektimer = 24

            #weekly pull list gets messed up if it's not populated first, so let's populate it then set the scheduler.
            logger.info('[WEEKLY] Checking for existance of Weekly Comic listing...')

            #now the scheduler (check every 24 hours)
            weekly_interval = weektimer * 60 * 60
            try:
                if SCHED_WEEKLY_LAST:
                    pass
            except:
                SCHED_WEEKLY_LAST = None

            weektimestamp = helpers.utctimestamp()
            if SCHED_WEEKLY_LAST is not None:
                weekly_timestamp = float(SCHED_WEEKLY_LAST)
            else:
                weekly_timestamp = weektimestamp + weekly_interval

            ws = weeklypullit.Weekly()
            duration_diff = (weektimestamp - weekly_timestamp)/60

            if abs(duration_diff) >= weekly_interval/60:
                logger.info('[WEEKLY] Weekly Pull-Update initializing immediately as it has been %s hours since the last run' % abs(duration_diff/60))
                SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=datetime.datetime.utcnow(), trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))
            else:
                weekly_diff = datetime.datetime.utcfromtimestamp(weektimestamp + (weekly_interval - (duration_diff * 60)))
                logger.fdebug('[WEEKLY] Scheduling next run for @ %s every %s hours' % (weekly_diff, weektimer))
                SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=weekly_diff, trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))

            #initiate startup rss feeds for torrents/nzbs here...
            if CONFIG.ENABLE_RSS:
                logger.info('[RSS-FEEDS] Initiating startup-RSS feed checks.')
                if SCHED_RSS_LAST is not None:
                    rss_timestamp = float(SCHED_RSS_LAST)
                    logger.info('[RSS-FEEDS] RSS last run @ %s' % datetime.datetime.utcfromtimestamp(rss_timestamp))
                else:
                    rss_timestamp = helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) *60)
                rs = rsscheckit.tehMain()
                duration_diff = (helpers.utctimestamp() - rss_timestamp)/60
                if duration_diff >= int(CONFIG.RSS_CHECKINTERVAL):
                    SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=datetime.datetime.utcnow(), trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
                else:
                    rss_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) * 60) - (duration_diff * 60))
                    logger.fdebug('[RSS-FEEDS] Scheduling next run for @ %s every %s minutes' % (rss_diff, CONFIG.RSS_CHECKINTERVAL))
                    SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=rss_diff, trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))

            if CONFIG.CHECK_GITHUB:
                vs = versioncheckit.CheckVersion()
                SCHED.add_job(func=vs.run, id='version', name='Check Version', trigger=IntervalTrigger(hours=0, minutes=CONFIG.CHECK_GITHUB_INTERVAL, timezone='UTC'))

            ##run checkFolder every X minutes (basically Manual Run Post-Processing)
            if CONFIG.ENABLE_CHECK_FOLDER:
                if CONFIG.DOWNLOAD_SCAN_INTERVAL >0:
                    logger.info('[FOLDER MONITOR] Enabling folder monitor for : ' + str(CONFIG.CHECK_FOLDER) + ' every ' + str(CONFIG.DOWNLOAD_SCAN_INTERVAL) + ' minutes.')
                    fm = PostProcessor.FolderCheck()
                    SCHED.add_job(func=fm.run, id='monitor', name='Folder Monitor', trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.DOWNLOAD_SCAN_INTERVAL), timezone='UTC'))
                else:
                    logger.error('[FOLDER MONITOR] You need to specify a monitoring time for the check folder option to work')

            logger.info('Firing up the Background Schedulers now....')
            try:
                SCHED.start()
                #update the job db here
                logger.info('Background Schedulers successfully started...')
                helpers.job_management(write=True)
            except Exception as e:
                logger.info(e)
                SCHED.print_jobs()

        started = True
示例#14
0
def start():

    global _INITIALIZED, started

    with INIT_LOCK:

        if _INITIALIZED:

            #load up the previous runs from the job sql table so we know stuff...
            monitors = helpers.job_management()
            SCHED_WEEKLY_LAST = monitors['weekly']
            SCHED_SEARCH_LAST = monitors['search']
            SCHED_UPDATER_LAST = monitors['dbupdater']
            SCHED_MONITOR_LAST = monitors['monitor']
            SCHED_VERSION_LAST = monitors['version']
            SCHED_RSS_LAST = monitors['rss']

            # Start our scheduled background tasks
            SCHED.add_job(func=updater.dbUpdate, id='dbupdater', name='DB Updater', args=[None,None,True], trigger=IntervalTrigger(hours=5, minutes=5, timezone='UTC'))

            #let's do a run at the Wanted issues here (on startup) if enabled.
            ss = searchit.CurrentSearcher()
            if CONFIG.NZB_STARTUP_SEARCH:
                SCHED.add_job(func=ss.run, id='search', next_run_time=datetime.datetime.utcnow(), name='Auto-Search', trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
            else:
                if SCHED_SEARCH_LAST is not None:
                    search_timestamp = float(SCHED_SEARCH_LAST)
                    logger.fdebug('[AUTO-SEARCH] Search last run @ %s' % datetime.datetime.utcfromtimestamp(search_timestamp))
                else:
                    search_timestamp = helpers.utctimestamp() + (int(CONFIG.SEARCH_INTERVAL) *60)

                duration_diff = (helpers.utctimestamp() - search_timestamp)/60
                if duration_diff >= int(CONFIG.SEARCH_INTERVAL):
                    logger.fdebug('[AUTO-SEARCH]Auto-Search set to a delay of one minute before initialization as it has been %s minutes since the last run' % duration_diff)
                    SCHED.add_job(func=ss.run, id='search', name='Auto-Search', trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))
                else:
                    search_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + ((int(CONFIG.SEARCH_INTERVAL) * 60)  - (duration_diff*60)))
                    logger.fdebug('[AUTO-SEARCH] Scheduling next run @ %s every %s minutes' % (search_diff, CONFIG.SEARCH_INTERVAL))
                    SCHED.add_job(func=ss.run, id='search', name='Auto-Search', next_run_time=search_diff, trigger=IntervalTrigger(hours=0, minutes=CONFIG.SEARCH_INTERVAL, timezone='UTC'))

            if all([CONFIG.ENABLE_TORRENTS, CONFIG.AUTO_SNATCH, OS_DETECT != 'Windows']) and any([CONFIG.TORRENT_DOWNLOADER == 2, CONFIG.TORRENT_DOWNLOADER == 4]):
                logger.info('[AUTO-SNATCHER] Auto-Snatch of completed torrents enabled & attempting to background load....')
                SNPOOL = threading.Thread(target=helpers.worker_main, args=(SNATCHED_QUEUE,), name="AUTO-SNATCHER")
                SNPOOL.start()
                logger.info('[AUTO-SNATCHER] Succesfully started Auto-Snatch add-on - will now monitor for completed torrents on client....')

            if CONFIG.POST_PROCESSING is True and ( all([CONFIG.NZB_DOWNLOADER == 0, CONFIG.SAB_CLIENT_POST_PROCESSING is True]) or all([CONFIG.NZB_DOWNLOADER == 1, CONFIG.NZBGET_CLIENT_POST_PROCESSING is True]) ):
                if CONFIG.NZB_DOWNLOADER == 0:
                    logger.info('[SAB-MONITOR] Completed post-processing handling enabled for SABnzbd. Attempting to background load....')
                elif CONFIG.NZB_DOWNLOADER == 1:
                    logger.info('[NZBGET-MONITOR] Completed post-processing handling enabled for NZBGet. Attempting to background load....')
                NZBPOOL = threading.Thread(target=helpers.nzb_monitor, args=(NZB_QUEUE,), name="AUTO-COMPLETE-NZB")
                NZBPOOL.start()
                if CONFIG.NZB_DOWNLOADER == 0:
                    logger.info('[AUTO-COMPLETE-NZB] Succesfully started Completed post-processing handling for SABnzbd - will now monitor for completed nzbs within sabnzbd and post-process automatically....')
                elif CONFIG.NZB_DOWNLOADER == 1:
                    logger.info('[AUTO-COMPLETE-NZB] Succesfully started Completed post-processing handling for NZBGet - will now monitor for completed nzbs within nzbget and post-process automatically....')

            logger.info('[SEARCH-QUEUE] Attempting to background load the search queue....')
            SEARCHPOOL = threading.Thread(target=helpers.search_queue, args=(SEARCH_QUEUE,), name="SEARCH-QUEUE")
            SEARCHPOOL.start()

            if all([CONFIG.POST_PROCESSING is True, CONFIG.API_ENABLED is True]):
                logger.info('[POST-PROCESS-QUEUE] Post Process queue enabled & monitoring for api requests....')
                PPPOOL = threading.Thread(target=helpers.postprocess_main, args=(PP_QUEUE,), name="POST-PROCESS-QUEUE")
                PPPOOL.start()
                logger.info('[POST-PROCESS-QUEUE] Succesfully started Post-Processing Queuer....')

            helpers.latestdate_fix()

            if CONFIG.ALT_PULL == 2:
                weektimer = 4
            else:
                weektimer = 24

            #weekly pull list gets messed up if it's not populated first, so let's populate it then set the scheduler.
            logger.info('[WEEKLY] Checking for existance of Weekly Comic listing...')

            #now the scheduler (check every 24 hours)
            weekly_interval = weektimer * 60 * 60
            try:
                if SCHED_WEEKLY_LAST:
                    pass
            except:
                SCHED_WEEKLY_LAST = None

            weektimestamp = helpers.utctimestamp()
            if SCHED_WEEKLY_LAST is not None:
                weekly_timestamp = float(SCHED_WEEKLY_LAST)
            else:
                weekly_timestamp = weektimestamp + weekly_interval

            ws = weeklypullit.Weekly()
            duration_diff = (weektimestamp - weekly_timestamp)/60

            if abs(duration_diff) >= weekly_interval/60:
                logger.info('[WEEKLY] Weekly Pull-Update initializing immediately as it has been %s hours since the last run' % abs(duration_diff/60))
                SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=datetime.datetime.utcnow(), trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))
            else:
                weekly_diff = datetime.datetime.utcfromtimestamp(weektimestamp + (weekly_interval - (duration_diff * 60)))
                logger.fdebug('[WEEKLY] Scheduling next run for @ %s every %s hours' % (weekly_diff, weektimer))
                SCHED.add_job(func=ws.run, id='weekly', name='Weekly Pullist', next_run_time=weekly_diff, trigger=IntervalTrigger(hours=weektimer, minutes=0, timezone='UTC'))

            #initiate startup rss feeds for torrents/nzbs here...
            rs = rsscheckit.tehMain()
            if CONFIG.ENABLE_RSS:
                logger.info('[RSS-FEEDS] Initiating startup-RSS feed checks.')
                if SCHED_RSS_LAST is not None:
                    rss_timestamp = float(SCHED_RSS_LAST)
                    logger.info('[RSS-FEEDS] RSS last run @ %s' % datetime.datetime.utcfromtimestamp(rss_timestamp))
                else:
                    rss_timestamp = helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) *60)
                duration_diff = (helpers.utctimestamp() - rss_timestamp)/60
                if duration_diff >= int(CONFIG.RSS_CHECKINTERVAL):
                    SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=datetime.datetime.utcnow(), trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
                else:
                    rss_diff = datetime.datetime.utcfromtimestamp(helpers.utctimestamp() + (int(CONFIG.RSS_CHECKINTERVAL) * 60) - (duration_diff * 60))
                    logger.fdebug('[RSS-FEEDS] Scheduling next run for @ %s every %s minutes' % (rss_diff, CONFIG.RSS_CHECKINTERVAL))
                    SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], next_run_time=rss_diff, trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
            #else:
            #    SCHED.add_job(func=rs.run, id='rss', name='RSS Feeds', args=[True], trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.RSS_CHECKINTERVAL), timezone='UTC'))
            #    SCHED.pause_job('rss')

            if CONFIG.CHECK_GITHUB:
                vs = versioncheckit.CheckVersion()
                SCHED.add_job(func=vs.run, id='version', name='Check Version', trigger=IntervalTrigger(hours=0, minutes=CONFIG.CHECK_GITHUB_INTERVAL, timezone='UTC'))

            ##run checkFolder every X minutes (basically Manual Run Post-Processing)
            if CONFIG.ENABLE_CHECK_FOLDER:
                if CONFIG.DOWNLOAD_SCAN_INTERVAL >0:
                    logger.info('[FOLDER MONITOR] Enabling folder monitor for : ' + str(CONFIG.CHECK_FOLDER) + ' every ' + str(CONFIG.DOWNLOAD_SCAN_INTERVAL) + ' minutes.')
                    fm = PostProcessor.FolderCheck()
                    SCHED.add_job(func=fm.run, id='monitor', name='Folder Monitor', trigger=IntervalTrigger(hours=0, minutes=int(CONFIG.DOWNLOAD_SCAN_INTERVAL), timezone='UTC'))
                else:
                    logger.error('[FOLDER MONITOR] You need to specify a monitoring time for the check folder option to work')

            logger.info('Firing up the Background Schedulers now....')
            try:
                SCHED.start()
                #update the job db here
                logger.info('Background Schedulers successfully started...')
                helpers.job_management(write=True)
            except Exception as e:
                logger.info(e)
                SCHED.print_jobs()

        started = True