コード例 #1
0
ファイル: logger.py プロジェクト: wraslor/mylar
    def log(self, message, level):

        logger = logging.getLogger('mylar')

        threadname = threading.currentThread().getName()

        if level != 'DEBUG':
            if mylar.OS_DETECT == "Windows" and mylar.OS_ENCODING is not "utf-8":
                tmpthedate = unicodedata.normalize(
                    'NFKD',
                    helpers.now().decode(mylar.OS_ENCODING, "replace"))
            else:
                tmpthedate = helpers.now()
            mylar.LOG_LIST.insert(0, (tmpthedate, message, level, threadname))

        message = threadname + ' : ' + message

        if level == 'DEBUG':
            logger.debug(message)
        elif level == 'INFO':
            logger.info(message)
        elif level == 'WARNING':
            logger.warn(message)
        elif level == 'FDEBUG':
            logger.debug(message)
        else:
            logger.error(message)
コード例 #2
0
ファイル: logger.py プロジェクト: Decipher/mylar
    def log(self, message, level):

        logger = logging.getLogger('mylar')
        
        threadname = threading.currentThread().getName()
        
        if level != 'DEBUG':
            if mylar.OS_DETECT == "Windows" and mylar.OS_ENCODING is not "utf-8":
                tmpthedate = unicodedata.normalize('NFKD', helpers.now().decode(mylar.OS_ENCODING, "replace"))
            else:
                tmpthedate = helpers.now()
            mylar.LOG_LIST.insert(0, (tmpthedate, message, level, threadname))
        
        message = threadname + ' : ' + message

        if level == 'DEBUG':
            logger.debug(message)
        elif level == 'INFO':
            logger.info(message)
        elif level == 'WARNING':
            logger.warn(message)
        elif level == 'FDEBUG':
            logger.debug(message)
        else:
            logger.error(message)
コード例 #3
0
ファイル: updater.py プロジェクト: mriutta/mylar
def foundsearch(ComicID, IssueID, down=None):
    # When doing a Force Search (Wanted tab), the resulting search calls this to update.

    # this is all redudant code that forceRescan already does.
    # should be redone at some point so that instead of rescanning entire
    # series directory, it just scans for the issue it just downloaded and
    # and change the status to Snatched accordingly. It is not to increment the have count
    # at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.

    myDB = db.DBConnection()
    comic = myDB.action('SELECT * FROM comics WHERE ComicID=?',
                        [ComicID]).fetchone()
    issue = myDB.action('SELECT * FROM issues WHERE IssueID=?',
                        [IssueID]).fetchone()
    CYear = issue['IssueDate'][:4]

    if down is None:
        # update the status to Snatched (so it won't keep on re-downloading!)
        logger.fdebug("updating status to snatched")
        controlValue = {"IssueID": IssueID}
        newValue = {"Status": "Snatched"}
        myDB.upsert("issues", newValue, controlValue)

        # update the snatched DB
        snatchedupdate = {"IssueID": IssueID, "Status": "Snatched"}
        newsnatchValues = {
            "ComicName": comic['ComicName'],
            "ComicID": ComicID,
            "Issue_Number": issue['Issue_Number'],
            "DateAdded": helpers.now(),
            "Status": "Snatched"
        }
        myDB.upsert("snatched", newsnatchValues, snatchedupdate)
    else:
        snatchedupdate = {"IssueID": IssueID, "Status": "Downloaded"}
        newsnatchValues = {
            "ComicName": comic['ComicName'],
            "ComicID": ComicID,
            "Issue_Number": issue['Issue_Number'],
            "DateAdded": helpers.now(),
            "Status": "Downloaded"
        }
        myDB.upsert("snatched", newsnatchValues, snatchedupdate)

    #print ("finished updating snatched db.")
    logger.info(u"Updating now complete for " + comic['ComicName'] +
                " issue: " + str(issue['Issue_Number']))
    return
コード例 #4
0
ファイル: updater.py プロジェクト: ChaniD/mylar
def foundsearch(ComicID, IssueID, down=None):
    # When doing a Force Search (Wanted tab), the resulting search calls this to update.

    # this is all redudant code that forceRescan already does.
    # should be redone at some point so that instead of rescanning entire 
    # series directory, it just scans for the issue it just downloaded and
    # and change the status to Snatched accordingly. It is not to increment the have count
    # at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.

    myDB = db.DBConnection()
    comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
    issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
    CYear = issue['IssueDate'][:4]

    if down is None:
        # update the status to Snatched (so it won't keep on re-downloading!)
        logger.fdebug("updating status to snatched")
        controlValue = {"IssueID":   IssueID}
        newValue = {"Status":    "Snatched"}
        myDB.upsert("issues", newValue, controlValue)

        # update the snatched DB
        snatchedupdate = {"IssueID":     IssueID,
                          "Status":      "Snatched"
                          }
        newsnatchValues = {"ComicName":       comic['ComicName'],
                           "ComicID":         ComicID,
                           "Issue_Number":    issue['Issue_Number'],
                           "DateAdded":       helpers.now(),
                           "Status":          "Snatched"
                           }
        myDB.upsert("snatched", newsnatchValues, snatchedupdate)
    else:
        snatchedupdate = {"IssueID":     IssueID,
                          "Status":      "Downloaded"
                          }
        newsnatchValues = {"ComicName":       comic['ComicName'],
                           "ComicID":         ComicID,
                           "Issue_Number":    issue['Issue_Number'],
                           "DateAdded":       helpers.now(),
                           "Status":          "Downloaded"
                           }
        myDB.upsert("snatched", newsnatchValues, snatchedupdate)


    #print ("finished updating snatched db.")
    logger.info(u"Updating now complete for " + comic['ComicName'] + " issue: " + str(issue['Issue_Number']))
    return
コード例 #5
0
ファイル: logger.py プロジェクト: yonkyunior/mylar
        def log(message, level):
            logger = logging.getLogger('mylar')

            threadname = threading.currentThread().getName()

            # Get the frame data of the method that made the original logger call
            if len(inspect.stack()) > 2:
                frame = inspect.getframeinfo(inspect.stack()[2][0])
                program = os.path.basename(frame.filename)
                method = frame.function
                lineno = frame.lineno
            else:
                program = ""
                method = ""
                lineno = ""

            if PY2:
                message = safe_unicode(message)
                message = message.encode(mylar.SYS_ENCODING)
            if level != 'DEBUG' or mylar.LOG_LEVEL >= 2:
                mylar.LOGLIST.insert(
                    0, (helpers.now(), message, level, threadname))
                if len(mylar.LOGLIST) > 2500:
                    del mylar.LOGLIST[-1]

            message = "%s : %s:%s:%s : %s" % (threadname, program, method,
                                              lineno, message)
            if level == 'DEBUG':
                logger.debug(message)
            elif level == 'INFO':
                logger.info(message)
            elif level == 'WARNING':
                logger.warning(message)
            else:
                logger.error(message)
コード例 #6
0
    def markFailed(self):
        #use this to forcibly mark a single issue as being Failed (ie. if a search result is sent to a client, but the result
        #ends up passing in a 404 or something that makes it so that the download can't be initiated).
        module = '[FAILED-DOWNLOAD]'

        myDB = db.DBConnection()

        logger.info(module + ' Marking as a Failed Download.')

        logger.fdebug(module + 'nzb_name: ' + self.nzb_name)
        logger.fdebug(module + 'issueid: ' + str(self.issueid))
        logger.fdebug(module + 'nzb_id: ' + str(self.id))
        logger.fdebug(module + 'prov: ' + self.prov)

        logger.fdebug('oneoffinfo: ' + str(self.oneoffinfo))
        if self.oneoffinfo:
            ComicName = self.oneoffinfo['ComicName']
            IssueNumber = self.oneoffinfo['IssueNumber']

        else:
            if 'annual' in self.nzb_name.lower():
                logger.info(module + ' Annual detected.')
                annchk = "yes"
                issuenzb = myDB.selectone(
                    "SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL",
                    [self.issueid]).fetchone()
            else:
                issuenzb = myDB.selectone(
                    "SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL",
                    [self.issueid]).fetchone()

            ctrlVal = {"IssueID": self.issueid}
            Vals = {"Status": 'Failed'}
            myDB.upsert("issues", Vals, ctrlVal)
            ComicName = issuenzb['ComicName']
            IssueNumber = issuenzb['Issue_Number']

        ctrlVal = {
            "ID": self.id,
            "Provider": self.prov,
            "NZBName": self.nzb_name
        }
        Vals = {
            "Status": 'Failed',
            "ComicName": ComicName,
            "Issue_Number": IssueNumber,
            "IssueID": self.issueid,
            "ComicID": self.comicid,
            "DateFailed": helpers.now()
        }
        myDB.upsert("failed", Vals, ctrlVal)

        logger.info(module + ' Successfully marked as Failed.')
コード例 #7
0
ファイル: rsscheck.py プロジェクト: citrusy/mylar
def tehMain(forcerss=None):
    logger.info('RSS Feed Check was last run at : ' + str(mylar.RSS_LASTRUN))
    firstrun = "no"
    #check the last run of rss to make sure it's not hammering.
    if mylar.RSS_LASTRUN is None or mylar.RSS_LASTRUN == '' or mylar.RSS_LASTRUN == '0' or forcerss == True:
        logger.info('RSS Feed Check First Ever Run.')
        firstrun = "yes"
        mins = 0
    else:
        c_obj_date = datetime.datetime.strptime(mylar.RSS_LASTRUN,
                                                "%Y-%m-%d %H:%M:%S")
        n_date = datetime.datetime.now()
        absdiff = abs(n_date - c_obj_date)
        mins = (absdiff.days * 24 * 60 * 60 +
                absdiff.seconds) / 60.0  #3600 is for hours.

    if firstrun == "no" and mins < int(mylar.RSS_CHECKINTERVAL):
        logger.fdebug(
            'RSS Check has taken place less than the threshold - not initiating at this time.'
        )
        return

    mylar.RSS_LASTRUN = helpers.now()
    logger.fdebug('Updating RSS Run time to : ' + str(mylar.RSS_LASTRUN))
    mylar.config_write()

    #function for looping through nzbs/torrent feeds
    if mylar.ENABLE_TORRENTS:
        logger.fdebug('[RSS] Initiating Torrent RSS Check.')
        if mylar.ENABLE_KAT:
            logger.fdebug('[RSS] Initiating Torrent RSS Feed Check on KAT.')
            torrents(pickfeed='3')
        if mylar.ENABLE_CBT:
            logger.fdebug('[RSS] Initiating Torrent RSS Feed Check on CBT.')
            torrents(pickfeed='1')
            torrents(pickfeed='4')
    logger.fdebug('[RSS] Initiating RSS Feed Check for NZB Providers.')
    nzbs()
    logger.fdebug('[RSS] RSS Feed Check/Update Complete')
    logger.fdebug('[RSS] Watchlist Check for new Releases')
    #if mylar.ENABLE_TORRENTS:
    #    if mylar.ENABLE_KAT:
    #        search.searchforissue(rsscheck='yes')
    #    if mylar.ENABLE_CBT:
    mylar.search.searchforissue(rsscheck='yes')
    #nzbcheck here
    #nzbs(rsscheck='yes')
    logger.fdebug('[RSS] Watchlist Check complete.')
    return
コード例 #8
0
ファイル: updater.py プロジェクト: partymike/mylar
def foundsearch(ComicID, IssueID):
    myDB = db.DBConnection()
    #print ("Updater-ComicID: " + str(ComicID))
    #print ("Updater-IssueID: " + str(IssueID))
    comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
    issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
    #print ("comic location: " + comic['ComicLocation'])
    #this is too soon - file hasn't downloaded even yet.
    #fixed and addressed in search.py and follow-thru here!
    #check sab history for completion here :)
    CYear = issue['IssueDate'][:4]
    print ("year:" + str(CYear))
    #slog = myDB.action('SELECT * FROM sablog WHERE ComicName=? AND ComicYEAR=?', [issue['ComicName'], str(CYear)]).fetchone()
    #this checks the active queue for downloading/non-existant jobs
    #--end queue check
    #this checks history for completed jobs...
    #---
    #-- end history check

    fc = filechecker.listFiles(comic['ComicLocation'], comic['ComicName'])
    HaveDict = {"ComicID": ComicID}
    newHave = { "Have":    fc['comiccount'] }
    myDB.upsert("comics", newHave, HaveDict)
    #---
    issue = myDB.action('SELECT * FROM issues WHERE IssueID=? AND ComicID=?', [IssueID, ComicID]).fetchone()
    #print ("updating status to snatched")
    controlValueDict = {"IssueID":  IssueID}
    newValueDict = {"Status": "Snatched"}
    #print ("updating snatched db.")
    myDB.upsert("issues", newValueDict, controlValueDict)
    snatchedupdate = {"IssueID":     IssueID}
    newsnatchValues = {"ComicName":       comic['ComicName'],
                       "ComicID":         ComicID,
                       "Issue_Number":    issue['Issue_Number'],
                       "DateAdded":       helpers.now(),
                       "Status":          "Snatched"
                       }
    myDB.upsert("snatched", newsnatchValues, snatchedupdate)
    #we need to update sablog now to mark the nzo_id row as being completed and not used again.
    #this becomes an issue with files downloaded x2 or same name...


    #print ("finished updating snatched db.")
    logger.info(u"Updating now complete for " + str(comic['ComicName']) + " issue: " + str(issue['Issue_Number']))
    return
コード例 #9
0
ファイル: rsscheck.py プロジェクト: GingerCowboy/mylar
def tehMain():
    logger.info('RSS Feed Check was last run at : ' + str(mylar.RSS_LASTRUN))
    firstrun = "no"
    #check the last run of rss to make sure it's not hammering.
    if mylar.RSS_LASTRUN is None or mylar.RSS_LASTRUN == '' or mylar.RSS_LASTRUN == '0':
        logger.info('RSS Feed Check First Ever Run.')
        firstrun = "yes"
        mins = 0
    else:
        c_obj_date = datetime.datetime.strptime(mylar.RSS_LASTRUN, "%Y-%m-%d %H:%M:%S")
        n_date = datetime.datetime.now()
        absdiff = abs(n_date - c_obj_date)
        mins = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0  #3600 is for hours.

    if firstrun == "no" and mins < int(mylar.RSS_CHECKINTERVAL):
        logger.fdebug('RSS Check has taken place less than the threshold - not initiating at this time.')
        return

    mylar.RSS_LASTRUN = helpers.now()
    logger.fdebug('Updating RSS Run time to : ' + str(mylar.RSS_LASTRUN))
    mylar.config_write()

    #function for looping through nzbs/torrent feeds
    if mylar.ENABLE_TORRENTS:
        logger.fdebug("[RSS] Initiating Torrent RSS Check.")
        if mylar.ENABLE_KAT:
            logger.fdebug('[RSS] Initiating Torrent RSS Feed Check on KAT.')
            torrents(pickfeed='3')
        if mylar.ENABLE_CBT:
            logger.fdebug('[RSS] Initiating Torrent RSS Feed Check on CBT.')
            torrents(pickfeed='1')
            torrents(pickfeed='4')
    logger.fdebug('RSS] Initiating RSS Feed Check for NZB Providers.')
    nzbs()    
    logger.fdebug('[RSS] RSS Feed Check/Update Complete')
    logger.fdebug('[RSS] Watchlist Check for new Releases')
    #if mylar.ENABLE_TORRENTS:
    #    if mylar.ENABLE_KAT:
    #        search.searchforissue(rsscheck='yes')
    #    if mylar.ENABLE_CBT:    
    mylar.search.searchforissue(rsscheck='yes')
    #nzbcheck here
    #nzbs(rsscheck='yes')
    logger.fdebug('[RSS] Watchlist Check complete.')
    return
コード例 #10
0
ファイル: Failed.py プロジェクト: DarkSir23/mylar
    def markFailed(self):
        #use this to forcibly mark a single issue as being Failed (ie. if a search result is sent to a client, but the result
        #ends up passing in a 404 or something that makes it so that the download can't be initiated).
        module = '[FAILED-DOWNLOAD]'

        myDB = db.DBConnection()

        logger.info(module + ' Marking as a Failed Download.')

        logger.fdebug(module + 'nzb_name: ' + self.nzb_name)
        logger.fdebug(module + 'issueid: ' + str(self.issueid))
        logger.fdebug(module + 'nzb_id: ' + str(self.id))
        logger.fdebug(module + 'prov: ' + self.prov)

        logger.fdebug('oneoffinfo: ' + str(self.oneoffinfo))
        if self.oneoffinfo:
            ComicName = self.oneoffinfo['ComicName']
            IssueNumber = self.oneoffinfo['IssueNumber']

        else:
            if 'annual' in self.nzb_name.lower():
                logger.info(module + ' Annual detected.')
                annchk = "yes"
                issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()
            else:
                issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()

            ctrlVal = {"IssueID": self.issueid}
            Vals = {"Status":    'Failed'}
            myDB.upsert("issues", Vals, ctrlVal)
            ComicName = issuenzb['ComicName']
            IssueNumber = issuenzb['Issue_Number']

        ctrlVal = {"ID":       self.id,
                   "Provider": self.prov,
                   "NZBName":  self.nzb_name}
        Vals = {"Status":       'Failed',
                "ComicName":    ComicName,
                "Issue_Number": IssueNumber,
                "IssueID":      self.issueid,
                "ComicID":      self.comicid,
                "DateFailed":   helpers.now()}
        myDB.upsert("failed", Vals, ctrlVal)

        logger.info(module + ' Successfully marked as Failed.')
コード例 #11
0
ファイル: logger.py プロジェクト: DerInternet/mylar
    def log(self, message, level):

        logger = logging.getLogger('mylar')
        
        threadname = threading.currentThread().getName()
        
        if level != 'DEBUG':
            mylar.LOG_LIST.insert(0, (helpers.now(), message, level, threadname))
        
        message = threadname + ' : ' + message

        if level == 'DEBUG':
            logger.debug(message)
        elif level == 'INFO':
            logger.info(message)
        elif level == 'WARNING':
            logger.warn(message)
        else:
            logger.error(message)
コード例 #12
0
ファイル: updater.py プロジェクト: mjsmjs/mylar
def foundsearch(ComicID, IssueID):
    # When doing a Force Search (Wanted tab), the resulting search calls this to update.

    # this is all redudant code that forceRescan already does.
    # should be redone at some point so that instead of rescanning entire
    # series directory, it just scans for the issue it just downloaded and
    # and change the status to Snatched accordingly. It is not to increment the have count
    # at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.

    myDB = db.DBConnection()
    comic = myDB.action("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
    issue = myDB.action("SELECT * FROM issues WHERE IssueID=?", [IssueID]).fetchone()
    CYear = issue["IssueDate"][:4]

    #    fc = filechecker.listFiles(comic['ComicLocation'], comic['ComicName'])
    #    HaveDict = {"ComicID": ComicID}
    #    newHave = { "Have":    fc['comiccount'] }
    #    myDB.upsert("comics", newHave, HaveDict)
    #    #---
    issue = myDB.action("SELECT * FROM issues WHERE IssueID=? AND ComicID=?", [IssueID, ComicID]).fetchone()
    # update the status to Snatched (so it won't keep on re-downloading!)
    logger.fdebug("updating status to snatched")
    controlValue = {"IssueID": IssueID}
    newValue = {"Status": "Snatched"}
    myDB.upsert("issues", newValue, controlValue)
    # update the snatched DB
    controlValueDict = {"IssueID": IssueID}
    newValueDict = {"Status": "Snatched"}
    logger.fdebug("updating snatched db.")
    myDB.upsert("issues", newValueDict, controlValueDict)
    snatchedupdate = {"IssueID": IssueID}
    newsnatchValues = {
        "ComicName": comic["ComicName"],
        "ComicID": ComicID,
        "Issue_Number": issue["Issue_Number"],
        "DateAdded": helpers.now(),
        "Status": "Snatched",
    }
    myDB.upsert("snatched", newsnatchValues, snatchedupdate)

    # print ("finished updating snatched db.")
    logger.info(u"Updating now complete for " + str(comic["ComicName"]) + " issue: " + str(issue["Issue_Number"]))
    return
コード例 #13
0
    def log(self, message, level):

        logger = logging.getLogger('mylar')

        threadname = threading.currentThread().getName()

        if level != 'DEBUG':
            mylar.LOG_LIST.insert(0,
                                  (helpers.now(), message, level, threadname))

        message = threadname + ' : ' + message

        if level == 'DEBUG':
            logger.debug(message)
        elif level == 'INFO':
            logger.info(message)
        elif level == 'WARNING':
            logger.warn(message)
        elif level == 'FDEBUG':
            logger.debug(message)
        else:
            logger.error(message)
コード例 #14
0
ファイル: importer.py プロジェクト: mriutta/mylar
    #print ("recentchk: " + str(recentchk))
    if recentchk <= 55:
        lastpubdate = 'Present'
    else:
        lastpubdate = str(ltmonth) + ' ' + str(ltyear)

    publishfigure = str(stmonth) + ' ' + str(styear) + ' - ' + str(lastpubdate)

    controlValueStat = {"ComicID": comicid}

    newValueStat = {
        "Status": "Active",
        "LatestIssue": latestiss,
        "LatestDate": latestdate,
        "ComicPublished": publishfigure,
        "LastUpdated": helpers.now()
    }

    myDB.upsert("comics", newValueStat, controlValueStat)

    if mylar.CVINFO or (mylar.CV_ONLY and mylar.CVINFO):
        if not os.path.exists(os.path.join(comlocation,
                                           "cvinfo")) or mylar.CV_ONETIMER:
            with open(os.path.join(comlocation, "cvinfo"), "w") as text_file:
                text_file.write(str(comic['ComicURL']))

    logger.info(u"Updating complete for: " + comic['ComicName'])

    #move the files...if imported is not empty (meaning it's not from the mass importer.)
    if imported is None or imported == 'None':
        pass
コード例 #15
0
ファイル: logger.py プロジェクト: 2mny/mylar
 def emit(self, record):
     message = self.format(record)
     message = message.replace("\n", "<br />")
     mylar.LOG_LIST.insert(0, (helpers.now(), message, record.levelname, record.threadName))
コード例 #16
0
ファイル: weeklypull.py プロジェクト: citrusy/mylar
def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None, futurepull=None, issue=None):
    if futurepull is None:
        logger.info(u"Checking the Weekly Releases list for comics I'm watching...")
    else:
        logger.info('Checking the Future Releases list for upcoming comics I am watching for...')
    myDB = db.DBConnection()

    not_t = ['TP',
             'NA',
             'HC',
             'PI']

    not_c = ['PTG',
             'COMBO PACK',
             '(PP #']

    lines = []
    unlines = []
    llen = []
    ccname = []
    pubdate = []
    latestissue = []
    w = 0
    wc = 0
    tot = 0
    chkout = []
    watchfnd = []
    watchfndiss = []
    watchfndextra = []
    alternate = []

    #print ("----------WATCHLIST--------")
    a_list = []
    b_list = []
    comicid = []

    mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")

    con = sqlite3.connect(str(mylardb))

    with con:

        cur = con.cursor()
        # if it's a one-off check (during an add series), load the comicname here and ignore below.
        if comic1off_name:
            logger.fdebug("this is a one-off" + comic1off_name)
            lines.append(comic1off_name.strip())
            unlines.append(comic1off_name.strip())
            comicid.append(comic1off_id)
            latestissue.append(issue)
            w = 1            
        else:
            #let's read in the comic.watchlist from the db here
            cur.execute("SELECT ComicID, ComicName, ComicYear, ComicPublisher, ComicPublished, LatestDate, ForceContinuing, AlternateSearch, LatestIssue from comics")
            while True:
                watchd = cur.fetchone()
                #print ("watchd: " + str(watchd))
                if watchd is None:
                    break
                if 'Present' in watchd[4] or (helpers.now()[:4] in watchd[4]) or watchd[6] == 1:
                 # this gets buggered up when series are named the same, and one ends in the current
                 # year, and the new series starts in the same year - ie. Avengers
                 # lets' grab the latest issue date and see how far it is from current
                 # anything > 45 days we'll assume it's a false match ;)
                    logger.fdebug("ComicName: " + watchd[1])
                    latestdate = watchd[5]
                    logger.fdebug("latestdate:  " + str(latestdate))
                    if latestdate[8:] == '':
                        logger.fdebug("invalid date " + str(latestdate) + " appending 01 for day for continuation.")
                        latest_day = '01'
                    else:
                        latest_day = latestdate[8:]
                    c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),int(latest_day))
                    n_date = datetime.date.today()
                    logger.fdebug("c_date : " + str(c_date) + " ... n_date : " + str(n_date))
                    recentchk = (n_date - c_date).days
                    logger.fdebug("recentchk: " + str(recentchk) + " days")
                    chklimit = helpers.checkthepub(watchd[0])
                    logger.fdebug("Check date limit set to : " + str(chklimit))
                    logger.fdebug(" ----- ")
                    if recentchk < int(chklimit) or watchd[6] == 1:
                        if watchd[6] == 1:
                            logger.fdebug('Forcing Continuing Series enabled for series...')
                        # let's not even bother with comics that are not in the Present.
                        a_list.append(watchd[1])
                        b_list.append(watchd[2])
                        comicid.append(watchd[0])
                        pubdate.append(watchd[4])
                        latestissue.append(watchd[8])
                        lines.append(a_list[w].strip())
                        unlines.append(a_list[w].strip())
                        w+=1   # we need to increment the count here, so we don't count the same comics twice (albeit with alternate names)

                        #here we load in the alternate search names for a series and assign them the comicid and
                        #alternate names
                        Altload = helpers.LoadAlternateSearchNames(watchd[7], watchd[0])
                        if Altload == 'no results':
                            pass
                        else:
                            wc = 0 
                            alt_cid = Altload['ComicID']
                            n = 0
                            iscnt = Altload['Count']
                            while (n <= iscnt):
                                try:
                                    altval = Altload['AlternateName'][n]
                                except IndexError:
                                    break
                                cleanedname = altval['AlternateName']
                                a_list.append(altval['AlternateName'])
                                b_list.append(watchd[2])
                                comicid.append(alt_cid)
                                pubdate.append(watchd[4])
                                latestissue.append(watchd[8])
                                lines.append(a_list[w+wc].strip())
                                unlines.append(a_list[w+wc].strip())
                                logger.fdebug('loading in Alternate name for ' + str(cleanedname))
                                n+=1
                                wc+=1
                            w+=wc

                #-- to be removed - 
                        #print ( "Comic:" + str(a_list[w]) + " Year: " + str(b_list[w]) )
                        #if "WOLVERINE AND THE X-MEN" in str(a_list[w]): a_list[w] = "WOLVERINE AND X-MEN"
                        #lines.append(a_list[w].strip())
                        #unlines.append(a_list[w].strip())
                        #llen.append(a_list[w].splitlines())
                        #ccname.append(a_list[w].strip())
                        #tmpwords = a_list[w].split(None)
                        #ltmpwords = len(tmpwords)
                        #ltmp = 1
                #-- end to be removed
                    else:
                        logger.fdebug("Determined to not be a Continuing series at this time.")    
        cnt = int(w-1)
        cntback = int(w-1)
        kp = []
        ki = []
        kc = []
        otot = 0

        logger.fdebug("You are watching for: " + str(w) + " comics")
        #print ("----------THIS WEEK'S PUBLISHED COMICS------------")
        if w > 0:
            while (cnt > -1):
                latestiss = latestissue[cnt]
                lines[cnt] = lines[cnt].upper()
                #llen[cnt] = str(llen[cnt])
                logger.fdebug("looking for : " + lines[cnt])
                sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', ' ', lines[cnt])
                sqlsearch = re.sub("\&", '%', sqlsearch)
                sqlsearch = re.sub("\\bAND\\b", '%', sqlsearch)
                sqlsearch = re.sub("\\bTHE\\b", '', sqlsearch)
                if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch)
                sqlsearch = re.sub(r'\s', '%', sqlsearch)
                sqlsearch = sqlsearch + '%'
                #logger.fdebug("searchsql: " + sqlsearch)
                if futurepull is None:
                    weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [sqlsearch])
                else:
                    weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM future WHERE COMIC LIKE (?)', [sqlsearch])
                #cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
                for week in weekly:
                    if week == None:
                        break
                    for nono in not_t:
                        if nono in week['PUBLISHER']:
                            #logger.fdebug("nono present")
                            break
                        if nono in week['ISSUE']:
                            #logger.fdebug("graphic novel/tradeback detected..ignoring.")
                            break
                        for nothere in not_c:
                            if nothere in week['EXTRA']:
                                #logger.fdebug("nothere present")
                                break
                            else:
                                comicnm = week['COMIC']
                                #here's the tricky part, ie. BATMAN will match on
                                #every batman comic, not exact
                                logger.fdebug("comparing" + comicnm + "..to.." + unlines[cnt].upper())

                                #-NEW-
                                # strip out all special characters and compare
                                watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', '', unlines[cnt])
                                comicnm = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', '', comicnm)
                                if "THE" in watchcomic.upper() or "THE" in comicnm.upper():
                                    modwatchcomic = re.sub("\\bTHE\\b", "", watchcomic.upper())
                                    modcomicnm = re.sub("\\bTHE\\b", "", comicnm)
                                else:
                                    modwatchcomic = watchcomic
                                    modcomicnm = comicnm
                                if '&' in watchcomic.upper():
                                    modwatchcomic = re.sub('\&', 'AND', modwatchcomic.upper())
                                    modcomicnm = re.sub('\&', 'AND', modcomicnm)
                                if '&' in comicnm:
                                    modwatchcom = re.sub('\&', 'AND', modwatchcomic.upper())
                                    modcomicnm = re.sub('\&', 'AND', modcomicnm)
                                #thnx to A+X for this...
                                if '+' in watchcomic:
                                    logger.fdebug("+ detected...adjusting.")
                                    #logger.fdebug("comicnm:" + comicnm)
                                    #logger.fdebug("watchcomic:" + watchcomic)
                                    modwatchcomic = re.sub('\+', 'PLUS', modwatchcomic)
                                    #logger.fdebug("modcomicnm:" + modcomicnm)
                                    #logger.fdebug("modwatchcomic:" + modwatchcomic)

                                #annuals!
                                if 'ANNUAL' in comicnm.upper(): 
                                    modcomicnm = re.sub("\\bANNUAL\\b", "", modcomicnm.upper())

                                watchcomic = re.sub(r'\s', '', watchcomic)
                                comicnm = re.sub(r'\s', '', comicnm)
                                modwatchcomic = re.sub(r'\s', '', modwatchcomic)
                                modcomicnm = re.sub(r'\s', '', modcomicnm)
                                logger.fdebug("watchcomic : " + str(watchcomic) + " / mod :" + str(modwatchcomic))
                                logger.fdebug("comicnm : " + str(comicnm) + " / mod :" + str(modcomicnm))

                                if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper():
                                    logger.fdebug("matched on:" + comicnm + "..." + watchcomic.upper())
                                    pass
#                                elif ("ANNUAL" in week['EXTRA']):
#                                    pass
#                                    print ( row[3] + " matched on ANNUAL")
                                else:
                                    break


                                if ("NA" not in week['ISSUE']) and ("HC" not in week['ISSUE']):
                                    if ("COMBO PACK" not in week['EXTRA']) and ("2ND PTG" not in week['EXTRA']) and ("3RD PTG" not in week['EXTRA']):

                                    #this all needs to get redone, so the ability to compare issue dates can be done systematically.
                                    #Everything below should be in it's own function - at least the callable sections - in doing so, we can
                                    #then do comparisons when two titles of the same name exist and are by definition 'current'. Issue date comparisons
                                    #would identify the difference between two #1 titles within the same series year, but have different publishing dates.
                                    #Wolverine (2013) & Wolverine (2014) are good examples of this situation.
                                    #of course initially, the issue data for the newer series wouldn't have any issue data associated with it so it would be
                                    #a null value, but given that the 2013 series (as an example) would be from 2013-05-01, it obviously wouldn't be a match to
                                    #the current date & year (2014). Throwing out that, we could just assume that the 2014 would match the #1.

                                    #get the issue number of the 'weeklypull' series.
                                    #load in the actual series issue number's store-date (not publishing date)
                                    #---use a function to check db, then return the results in a tuple/list to avoid db locks.
                                    #if the store-date is >= weeklypull-list date then continue processing below.
                                    #if the store-date is <= weeklypull-list date then break.
                                    ### week['ISSUE']  #issue # from pullist
                                    ### week['SHIPDATE']  #weeklypull-list date
                                    ### comicid[cnt] #comicid of matched series                                                                

                                    ## if it's a futurepull, the dates get mixed up when two titles exist of the same name
                                    ## ie. Wolverine-2011 & Wolverine-2014
                                    ## we need to set the compare date to today's date ( Now() ) in this case.
                                        if futurepull:
                                            usedate = datetime.datetime.now().strftime('%Y%m%d')  #convert to yyyymmdd
                                        else:
                                            usedate = re.sub("[^0-9]", "", week['SHIPDATE'])

                                        if 'ANNUAL' in comicnm.upper():
                                            chktype = 'annual'
                                        else:
                                            chktype = 'series' 
                             
                                        datevalues = loaditup(watchcomic, comicid[cnt], week['ISSUE'], chktype)

                                        date_downloaded = None
                                        altissuenum = None

                                        if datevalues == 'no results':
                                        #if a series is a .NOW on the pullist, it won't match up against anything (probably) on CV
                                        #let's grab the digit from the .NOW, poll it against CV to see if there's any data
                                        #if there is, check the store date to make sure it's a 'new' release.
                                        #if it is a new release that has the same store date as the .NOW, then we assume
                                        #it's the same, and assign it the AltIssueNumber to do extra searches.
                                            if week['ISSUE'].isdigit() == False and '.' not in week['ISSUE']:
                                                altissuenum = re.sub("[^0-9]", "", week['ISSUE'])  # carry this through to get added to db later if matches
                                                logger.fdebug('altissuenum is: ' + str(altissuenum))
                                                altvalues = loaditup(watchcomic, comicid[cnt], altissuenum, chktype)
                                                if altvalues == 'no results':
                                                    logger.fdebug('No alternate Issue numbering - something is probably wrong somewhere.')
                                                    pass

                                                validcheck = checkthis(altvalues[0]['issuedate'], altvalues[0]['status'], usedate)
                                                if validcheck == False:
                                                    if date_downloaded is None:
                                                        break
                                            if chktype == 'series': 
                                                latest_int = helpers.issuedigits(latestiss)
                                                weekiss_int = helpers.issuedigits(week['ISSUE'])
                                                logger.fdebug('comparing ' + str(latest_int) + ' to ' + str(weekiss_int))
                                                if (latest_int > weekiss_int) or (latest_int == 0 or weekiss_int == 0):
                                                    logger.fdebug(str(week['ISSUE']) + ' should not be the next issue in THIS volume of the series.')
                                                    logger.fdebug('it should be either greater than ' + str(latestiss) + ' or an issue #0')
                                                    break

                                        else:
                                            #logger.fdebug('issuedate:' + str(datevalues[0]['issuedate']))
                                            #logger.fdebug('status:' + str(datevalues[0]['status']))
                                            datestatus = datevalues[0]['status']
                                            validcheck = checkthis(datevalues[0]['issuedate'], datestatus, usedate)
                                            if validcheck == True:
                                                if datestatus != 'Downloaded' and datestatus != 'Archived':
                                                    pass
                                                else:
                                                    logger.fdebug('Issue #' + str(week['ISSUE']) + ' already downloaded.')
                                                    date_downloaded = datestatus
                                            else:
                                                if date_downloaded is None:
                                                    break

                                        otot+=1
                                        dontadd = "no"
                                        if dontadd == "no":
                                            #print (row[0], row[1], row[2])
                                            tot+=1
                                            #kp.append(row[0])
                                            #ki.append(row[1])
                                            #kc.append(comicnm)
                                            if "ANNUAL" in comicnm.upper():
                                                watchfndextra.append("annual")
                                                ComicName = str(unlines[cnt]) + " Annual"
                                            else:
                                                ComicName = str(unlines[cnt])
                                                watchfndextra.append("none")
                                            watchfnd.append(comicnm)
                                            watchfndiss.append(week['ISSUE'])
                                            ComicID = comicid[cnt]
                                            if not mylar.CV_ONLY:
                                                ComicIssue = str(watchfndiss[tot -1] + ".00")
                                            else:
                                                ComicIssue = str(watchfndiss[tot -1])
                                            ComicDate = str(week['SHIPDATE'])
                                            #ComicName = str(unlines[cnt])
                                            logger.fdebug("Watchlist hit for : " + ComicName + " ISSUE: " + str(watchfndiss[tot -1]))

                                            if futurepull is None:
                                               # here we add to comics.latest
                                                updater.latest_update(ComicID=ComicID, LatestIssue=ComicIssue, LatestDate=ComicDate)
                                                # here we add to upcoming table...
                                                statusupdate = updater.upcoming_update(ComicID=ComicID, ComicName=ComicName, IssueNumber=ComicIssue, IssueDate=ComicDate, forcecheck=forcecheck)
                                            else:
                                                # here we add to upcoming table...
                                                statusupdate = updater.upcoming_update(ComicID=ComicID, ComicName=ComicName, IssueNumber=ComicIssue, IssueDate=ComicDate, forcecheck=forcecheck, futurepull='yes', altissuenumber=altissuenum)

                                            # here we update status of weekly table...
                                            if statusupdate is not None:
                                                cstatus = statusupdate['Status']
                                                cstatusid = statusupdate['ComicID']
                                            else:
                                                cstatus = None
                                                cstatusid = None
                                            #set the variable fp to denote updating the futurepull list ONLY
                                            if futurepull is None: 
                                                fp = None
                                            else: 
                                                cstatusid = ComicID
                                                fp = "yes"

                                            if date_downloaded is None:
                                                updater.weekly_update(ComicName=week['COMIC'], IssueNumber=ComicIssue, CStatus=cstatus, CID=cstatusid, futurepull=fp, altissuenumber=altissuenum)
                                            else:
                                                updater.weekly_update(ComicName=week['COMIC'], IssueNumber=ComicIssue, CStatus=date_downloaded, CID=cstatusid, futurepull=fp, altissuenumber=altissuenum)
                                            break
                                        break
                        break
                cnt-=1
        #print ("-------------------------")
        logger.fdebug("There are " + str(otot) + " comics this week to get!")
        #print ("However I've already grabbed " + str(btotal) )
        #print ("I need to get " + str(tot) + " comic(s)!" )
        logger.info(u"Finished checking for comics on my watchlist.")
    #con.close()
    return
コード例 #17
0
ファイル: importer.py プロジェクト: ChaniD/mylar
    recentchk = (n_date - c_date).days
    #print ("recentchk: " + str(recentchk))
    if recentchk <= 55:
        lastpubdate = 'Present'
    else:
        lastpubdate = str(ltmonth) + ' ' + str(ltyear)

    publishfigure = str(stmonth) + ' ' + str(styear) + ' - ' + str(lastpubdate)

    controlValueStat = {"ComicID":     comicid}
    
    newValueStat = {"Status":          "Active",
                    "LatestIssue":     latestiss,
                    "LatestDate":      latestdate,
                    "ComicPublished":  publishfigure,
                    "LastUpdated":     helpers.now()
                   }

    myDB.upsert("comics", newValueStat, controlValueStat)

    if mylar.CVINFO or (mylar.CV_ONLY and mylar.CVINFO):
        if not os.path.exists(os.path.join(comlocation,"cvinfo")) or mylar.CV_ONETIMER:
            with open(os.path.join(comlocation,"cvinfo"),"w") as text_file:
                text_file.write(str(comic['ComicURL']))
  
    logger.info(u"Updating complete for: " + comic['ComicName'])

    #move the files...if imported is not empty (meaning it's not from the mass importer.)
    if imported is None or imported == 'None':
        pass
    else:
コード例 #18
0
ファイル: importer.py プロジェクト: ChaniD/mylar
def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
    # this is for importing via GCD only and not using CV.
    # used when volume spanning is discovered for a Comic (and can't be added using CV).
    # Issue Counts are wrong (and can't be added).

    # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
    # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)
    
    gcdcomicid = gcomicid
    myDB = db.DBConnection()

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID":     gcdcomicid}

    comic = myDB.action('SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?', [gcomicid]).fetchone()
    ComicName = comic[0]
    ComicYear = comic[1]
    ComicIssues = comic[2]
    ComicPublished = comic[3]
    comlocation = comic[5]
    ComicPublisher = comic[6]
    #ComicImage = comic[4]
    #print ("Comic:" + str(ComicName))

    newValueDict = {"Status":   "Loading"}
    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now
    #comic = cv.getComic(comicid,'comic')

    if not comic:
        logger.warn("Error fetching comic. ID for : " + gcdcomicid)
        if dbcomic is None:
            newValueDict = {"ComicName":   "Fetch failed, try refreshing. (%s)" % (gcdcomicid),
                    "Status":   "Active"}
        else:
            newValueDict = {"Status":   "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return

    #run the re-sortorder here in order to properly display the page
    if pullupd is None:
        helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid)

    if ComicName.startswith('The '):
        sortname = ComicName[4:]
    else:
        sortname = ComicName


    logger.info(u"Now adding/updating: " + ComicName)
    #--Now that we know ComicName, let's try some scraping
    #--Start
    # gcd will return issue details (most importantly publishing date)
    comicid = gcomicid[1:]
    resultURL = "/series/" + str(comicid) + "/"
    gcdinfo=parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None)
    if gcdinfo == "No Match":
        logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")" )
        updater.no_searchresults(gcomicid)
        nomatch = "true"
        return nomatch
    logger.info(u"Sucessfully retrieved details for " + ComicName )
    # print ("Series Published" + parseit.resultPublished)
    #--End
    
    ComicImage = gcdinfo['ComicImage']

    #comic book location on machine
    # setup default location here
    if comlocation is None:
        # let's remove the non-standard characters here.
        u_comicnm = ComicName
        u_comicname = u_comicnm.encode('ascii', 'ignore').strip()
        if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname:
            comicdir = u_comicname
            if ':' in comicdir:
                comicdir = comicdir.replace(':','')
            if '/' in comicdir:
                comicdir = comicdir.replace('/','-')
            if ',' in comicdir:
                comicdir = comicdir.replace(',','')            
        else: comicdir = u_comicname

        series = comicdir
        publisher = ComicPublisher
        year = ComicYear

        #do work to generate folder path
        values = {'$Series':        series,
                  '$Publisher':     publisher,
                  '$Year':          year,
                  '$series':        series.lower(),
                  '$publisher':     publisher.lower(),
                  '$Volume':        year
                  }

        if mylar.FOLDER_FORMAT == '':
            comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
        else:
            comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values)

        #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")"
        if mylar.DESTINATION_DIR == "":
            logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
            return
        if mylar.REPLACE_SPACES:
            #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
            comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)

    #if it doesn't exist - create it (otherwise will bugger up later on)
    if os.path.isdir(str(comlocation)):
        logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
    else:
        #print ("Directory doesn't exist!")
        #try:
        #    os.makedirs(str(comlocation))
        #    logger.info(u"Directory successfully created at: " + str(comlocation))
        #except OSError:
        #    logger.error(u"Could not create comicdir : " + str(comlocation))
        filechecker.validateAndCreateDirectory(comlocation, True)

    comicIssues = gcdinfo['totalissues']

    #let's download the image...
    if os.path.exists(mylar.CACHE_DIR):pass
    else:
        #let's make the dir.
        try:
            os.makedirs(str(mylar.CACHE_DIR))
            logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))

        except OSError:
            logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR))

    coverfile = os.path.join(mylar.CACHE_DIR, str(gcomicid) + ".jpg")

    #try:
    urllib.urlretrieve(str(ComicImage), str(coverfile))
    try:
        with open(str(coverfile)) as f:
            ComicImage = os.path.join('cache',str(gcomicid) + ".jpg")

            #this is for Firefox when outside the LAN...it works, but I don't know how to implement it
            #without breaking the normal flow for inside the LAN (above)
            #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comi$

            logger.info(u"Sucessfully retrieved cover for " + ComicName)
            #if the comic cover local is checked, save a cover.jpg to the series folder.
            if mylar.COMIC_COVER_LOCAL:
                comiclocal = os.path.join(str(comlocation) + "/cover.jpg")
                shutil.copy(ComicImage,comiclocal)
    except IOError as e:
        logger.error(u"Unable to save cover locally at this time.")
        
    #if comic['ComicVersion'].isdigit():
    #    comicVol = "v" + comic['ComicVersion']
    #else:
    #    comicVol = None


    controlValueDict = {"ComicID":      gcomicid}
    newValueDict = {"ComicName":        ComicName,
                    "ComicSortName":    sortname,
                    "ComicYear":        ComicYear,
                    "Total":            comicIssues,
                    "ComicLocation":    comlocation,
                    #"ComicVersion":     comicVol,
                    "ComicImage":       ComicImage,
                    #"ComicPublisher":   comic['ComicPublisher'],
                    #"ComicPublished":   comicPublished,
                    "DateAdded":        helpers.today(),
                    "Status":           "Loading"}

    myDB.upsert("comics", newValueDict, controlValueDict)

    #comicsort here...
    #run the re-sortorder here in order to properly display the page
    if pullupd is None:
        helpers.ComicSort(sequence='update')

    logger.info(u"Sucessfully retrieved issue details for " + ComicName )
    n = 0
    iscnt = int(comicIssues)
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    #print ("total issues:" + str(iscnt))
    #---removed NEW code here---
    logger.info(u"Now adding/updating issues for " + ComicName)
    bb = 0
    while (bb <= iscnt):
        #---NEW.code
        try:
            gcdval = gcdinfo['gcdchoice'][bb]
            #print ("gcdval: " + str(gcdval))
        except IndexError:
            #account for gcd variation here
            if gcdinfo['gcdvariation'] == 'gcd':
                #print ("gcd-variation accounted for.")
                issdate = '0000-00-00'
                int_issnum =  int ( issis / 1000 )
            break
        if 'nn' in str(gcdval['GCDIssue']):
            #no number detected - GN, TP or the like
            logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
            updater.no_searchresults(comicid)
            return
        elif '.' in str(gcdval['GCDIssue']):
            issst = str(gcdval['GCDIssue']).find('.')
            issb4dec = str(gcdval['GCDIssue'])[:issst]
            #if the length of decimal is only 1 digit, assume it's a tenth
            decis = str(gcdval['GCDIssue'])[issst+1:]
            if len(decis) == 1:
                decisval = int(decis) * 10
                issaftdec = str(decisval)
            if len(decis) == 2:
                decisval = int(decis)
                issaftdec = str(decisval)
            if int(issaftdec) == 0: issaftdec = "00"
            gcd_issue = issb4dec + "." + issaftdec
            gcdis = (int(issb4dec) * 1000) + decisval
        else:
            gcdis = int(str(gcdval['GCDIssue'])) * 1000
            gcd_issue = str(gcdval['GCDIssue'])
        #get the latest issue / date using the date.
        int_issnum = int( gcdis / 1000 )
        issdate = str(gcdval['GCDDate'])
        issid = "G" + str(gcdval['IssueID'])
        if gcdval['GCDDate'] > latestdate:
            latestiss = str(gcd_issue)
            latestdate = str(gcdval['GCDDate'])
        #print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
        #---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone()


        # Only change the status & add DateAdded if the issue is not already in the database
        if iss_exists is None:
            newValueDict['DateAdded'] = helpers.today()

        #adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
        if "?" in str(issdate):
            issdate = "0000-00-00"             

        controlValueDict = {"IssueID":  issid}
        newValueDict = {"ComicID":            gcomicid,
                        "ComicName":          ComicName,
                        "Issue_Number":       gcd_issue,
                        "IssueDate":          issdate,
                        "Int_IssueNumber":    int_issnum
                        }

        #print ("issueid:" + str(controlValueDict))
        #print ("values:" + str(newValueDict))

        if mylar.AUTOWANT_ALL:
            newValueDict['Status'] = "Wanted"
        elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING:
            newValueDict['Status'] = "Wanted"
        else:
            newValueDict['Status'] = "Skipped"

        if iss_exists:
            #print ("Existing status : " + str(iss_exists['Status']))
            newValueDict['Status'] = iss_exists['Status']


        myDB.upsert("issues", newValueDict, controlValueDict)
        bb+=1

#        logger.debug(u"Updating comic cache for " + ComicName)
#        cache.getThumb(ComicID=issue['issueid'])

#        logger.debug(u"Updating cache for: " + ComicName)
#        cache.getThumb(ComicIDcomicid)


    controlValueStat = {"ComicID":     gcomicid}
    newValueStat = {"Status":          "Active",
                    "LatestIssue":     latestiss,
                    "LatestDate":      latestdate,
                    "LastUpdated":     helpers.now()
                   }

    myDB.upsert("comics", newValueStat, controlValueStat)

    if mylar.CVINFO:
        if not os.path.exists(comlocation + "/cvinfo"):
            with open(comlocation + "/cvinfo","w") as text_file:
                text_file.write("http://www.comicvine.com/volume/49-" + str(comicid))

    logger.info(u"Updating complete for: " + ComicName)

    #move the files...if imported is not empty (meaning it's not from the mass importer.)
    if imported is None or imported == 'None':
        pass
    else:
        if mylar.IMP_MOVE:
            logger.info("Mass import - Move files")
            moveit.movefiles(gcomicid,comlocation,ogcname)
        else:
            logger.info("Mass import - Moving not Enabled. Setting Archived Status for import.")
            moveit.archivefiles(gcomicid,ogcname)

    #check for existing files...
    updater.forceRescan(gcomicid)


    if pullupd is None:
        # lets' check the pullist for anyting at this time as well since we're here.
        if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished:
            logger.info(u"Checking this week's pullist for new issues of " + ComicName)
            updater.newpullcheck(comic['ComicName'], gcomicid)

        #here we grab issues that have been marked as wanted above...

        results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid])
        if results:
            logger.info(u"Attempting to grab wanted issues for : "  + ComicName)

            for result in results:
                foundNZB = "none"
                if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST):
                    foundNZB = search.searchforissue(result['IssueID'])
                    if foundNZB == "yes":
                        updater.foundsearch(result['ComicID'], result['IssueID'])
        else: logger.info(u"No issues marked as wanted for " + ComicName)

        logger.info(u"Finished grabbing what I could.")
コード例 #19
0
ファイル: rsscheckit.py プロジェクト: ChapeLu/mylar
    def run(self):

        with rss_lock:

            logger.info('RSS Feed Check was last run at : ' + str(mylar.RSS_LASTRUN))
            firstrun = "no"
            #check the last run of rss to make sure it's not hammering.
            if mylar.RSS_LASTRUN is None or mylar.RSS_LASTRUN == '' or mylar.RSS_LASTRUN == '0' or self.forcerss == True:
                logger.info('RSS Feed Check First Ever Run.')
                firstrun = "yes"
                mins = 0
            else:
                c_obj_date = datetime.datetime.strptime(mylar.RSS_LASTRUN, "%Y-%m-%d %H:%M:%S")
                n_date = datetime.datetime.now()
                absdiff = abs(n_date - c_obj_date)
                mins = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0  #3600 is for hours.

            if firstrun == "no" and mins < int(mylar.RSS_CHECKINTERVAL):
                logger.fdebug('RSS Check has taken place less than the threshold - not initiating at this time.')
                return

            mylar.RSS_LASTRUN = helpers.now()
            logger.fdebug('Updating RSS Run time to : ' + str(mylar.RSS_LASTRUN))
            mylar.config_write()

            #function for looping through nzbs/torrent feeds
            if mylar.ENABLE_TORRENT_SEARCH:
                logger.info('[RSS] Initiating Torrent RSS Check.')
                if mylar.ENABLE_KAT:
                    logger.info('[RSS] Initiating Torrent RSS Feed Check on KAT.')
                    rsscheck.torrents(pickfeed='3')
                    rsscheck.torrents(pickfeed='6')
                if mylar.ENABLE_32P:
                    logger.info('[RSS] Initiating Torrent RSS Feed Check on 32P.')
                    if mylar.MODE_32P == 0:
                        logger.fdebug('[RSS] 32P mode set to Legacy mode. Monitoring New Releases feed only.')
                        if any([mylar.PASSKEY_32P is None, mylar.PASSKEY_32P == '', mylar.RSSFEED_32P is None, mylar.RSSFEED_32P == '']):
                            logger.error('[RSS] Unable to validate information from provided RSS Feed. Verify that the feed provided is a current one.')
                        else:
                            rsscheck.torrents(pickfeed='1', feedinfo=mylar.KEYS_32P)
                    else:
                        logger.fdebug('[RSS] 32P mode set to Auth mode. Monitoring all personal notification feeds & New Releases feed')
                        if any([mylar.USERNAME_32P is None, mylar.USERNAME_32P == '', mylar.PASSWORD_32P is None]):
                            logger.error('[RSS] Unable to sign-on to 32P to validate settings. Please enter/check your username password in the configuration.')
                        else:
                            if mylar.KEYS_32P is None:
                                feed32p = auth32p.info32p()
                                feedinfo = feed32p.authenticate()
                                if feedinfo == "disable":
                                    mylar.ENABLE_32P = 0
                                    mylar.config_write()
                            else:
                                feedinfo = mylar.FEEDINFO_32P

                            if feedinfo is None or len(feedinfo) == 0 or feedinfo == "disable":
                                logger.error('[RSS] Unable to retrieve any information from 32P for RSS Feeds. Skipping for now.')
                            else:
                                rsscheck.torrents(pickfeed='1', feedinfo=feedinfo[0])
                                x = 0
                                #assign personal feeds for 32p > +8
                                for fi in feedinfo:
                                    x+=1
                                    pfeed_32p = str(7 + x)
                                    rsscheck.torrents(pickfeed=pfeed_32p, feedinfo=fi)

            logger.info('[RSS] Initiating RSS Feed Check for NZB Providers.')
            rsscheck.nzbs(forcerss=self.forcerss)
            logger.info('[RSS] RSS Feed Check/Update Complete')
            logger.info('[RSS] Watchlist Check for new Releases')
            mylar.search.searchforissue(rsscheck='yes')
            logger.info('[RSS] Watchlist Check complete.')
            if self.forcerss:
                logger.info('[RSS] Successfully ran a forced RSS Check.')
            return
コード例 #20
0
ファイル: weeklypull.py プロジェクト: mriutta/mylar
def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None):
    logger.info(u"Checking the Weekly Releases list for comics I'm watching...")
    myDB = db.DBConnection()

    not_t = ['TP',
             'NA',
             'HC',
             'PI']

    not_c = ['PTG',
             'COMBO PACK',
             '(PP #']

    lines = []
    unlines = []
    llen = []
    ccname = []
    pubdate = []
    w = 0
    tot = 0
    chkout = []
    watchfnd = []
    watchfndiss = []
    watchfndextra = []

    #print ("----------WATCHLIST--------")
    a_list = []
    b_list = []
    comicid = []

    mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")

    con = sqlite3.connect(str(mylardb))

    with con:

        cur = con.cursor()
        # if it's a one-off check (during an add series), load the comicname here and ignore below.
        if comic1off_name:
            logger.fdebug("this is a one-off" + str(comic1off_name))
            lines.append(comic1off_name.strip())
            unlines.append(comic1off_name.strip())
            comicid.append(comic1off_id)
            w = 1            
        else:
            #let's read in the comic.watchlist from the db here
            cur.execute("SELECT ComicID, ComicName, ComicYear, ComicPublisher, ComicPublished, LatestDate from comics")
            while True:
                watchd = cur.fetchone()
                #print ("watchd: " + str(watchd))
                if watchd is None:
                    break
                if 'Present' in watchd[4] or (helpers.now()[:4] in watchd[4]):
                 # this gets buggered up when series are named the same, and one ends in the current
                 # year, and the new series starts in the same year - ie. Avengers
                 # lets' grab the latest issue date and see how far it is from current
                 # anything > 45 days we'll assume it's a false match ;)
                    #logger.fdebug("ComicName: " + watchd[1])
                    latestdate = watchd[5]
                    #logger.fdebug("latestdate:  " + str(latestdate))
                    c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
                    n_date = datetime.date.today()
                    #logger.fdebug("c_date : " + str(c_date) + " ... n_date : " + str(n_date))
                    recentchk = (n_date - c_date).days
                    #logger.fdebug("recentchk: " + str(recentchk) + " days")
                    #logger.fdebug(" ----- ")
                    if recentchk < 55:
                        # let's not even bother with comics that are in the Present.
                        a_list.append(watchd[1])
                        b_list.append(watchd[2])
                        comicid.append(watchd[0])
                        pubdate.append(watchd[4])
                        #print ( "Comic:" + str(a_list[w]) + " Year: " + str(b_list[w]) )
                        #if "WOLVERINE AND THE X-MEN" in str(a_list[w]): a_list[w] = "WOLVERINE AND X-MEN"
                        lines.append(a_list[w].strip())
                        unlines.append(a_list[w].strip())
                        llen.append(a_list[w].splitlines())
                        ccname.append(a_list[w].strip())
                        tmpwords = a_list[w].split(None)
                        ltmpwords = len(tmpwords)
                        ltmp = 1
                        w+=1
        cnt = int(w-1)
        cntback = int(w-1)
        kp = []
        ki = []
        kc = []
        otot = 0

        logger.fdebug("You are watching for: " + str(w) + " comics")
        #print ("----------THIS WEEK'S PUBLISHED COMICS------------")
        if w > 0:
            while (cnt > -1):
                lines[cnt] = lines[cnt].upper()
                #llen[cnt] = str(llen[cnt])
                #logger.fdebug("looking for : " + str(lines[cnt]))
                sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', ' ', lines[cnt])
                sqlsearch = re.sub(r'\s', '%', sqlsearch) 
                if 'THE' in sqlsearch: sqlsearch = re.sub('THE', '', sqlsearch)
                if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch)
                #logger.fdebug("searchsql: " + str(sqlsearch))
                weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [sqlsearch])
                #cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
                for week in weekly:
                    if week == None:
                        break
                    for nono in not_t:
                        if nono in week['PUBLISHER']:
                            #logger.fdebug("nono present")
                            break
                        if nono in week['ISSUE']:
                            #logger.fdebug("graphic novel/tradeback detected..ignoring.")
                            break
                        for nothere in not_c:
                            if nothere in week['EXTRA']:
                                #logger.fdebug("nothere present")
                                break
                            else:
                                comicnm = week['COMIC']
                                #here's the tricky part, ie. BATMAN will match on
                                #every batman comic, not exact
                                #logger.fdebug("comparing" + str(comicnm) + "..to.." + str(unlines[cnt]).upper())

                                #-NEW-
                                # strip out all special characters and compare
                                watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', '', unlines[cnt])
                                comicnm = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', '', comicnm)
                                watchcomic = re.sub(r'\s', '', watchcomic)
                                comicnm = re.sub(r'\s', '', comicnm)
                                #logger.fdebug("Revised_Watch: " + watchcomic)
                                #logger.fdebug("ComicNM: " + comicnm)
                                if 'THE' in watchcomic.upper():
                                    modwatchcomic = re.sub('THE', '', watchcomic.upper())
                                    modcomicnm = re.sub('THE', '', comicnm)
                                else:
                                    modwatchcomic = watchcomic
                                    modcomicnm = comicnm
                                #thnx to A+X for this...
                                if '+' in watchcomic:
                                    logger.fdebug("+ detected...adjusting.")
                                    #logger.fdebug("comicnm:" + comicnm)
                                    #logger.fdebug("watchcomic:" + watchcomic)
                                    modwatchcomic = re.sub('\+', 'PLUS', modwatchcomic)
                                    #logger.fdebug("modcomicnm:" + modcomicnm)
                                    #logger.fdebug("modwatchcomic:" + modwatchcomic)
                                if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper():
                                    logger.fdebug("matched on:" + str(comicnm) + "..." + str(watchcomic).upper())
                                    pass
                                elif ("ANNUAL" in week['EXTRA']):
                                    pass
                                    #print ( row[3] + " matched on ANNUAL")
                                else:
                                    break
                                if ("NA" not in week['ISSUE']) and ("HC" not in week['ISSUE']):
                                    if ("COMBO PACK" not in week['EXTRA']) and ("2ND PTG" not in week['EXTRA']) and ("3RD PTG" not in week['EXTRA']):
                                        otot+=1
                                        dontadd = "no"
                                        if dontadd == "no":
                                            #print (row[0], row[1], row[2])
                                            tot+=1
                                            #kp.append(row[0])
                                            #ki.append(row[1])
                                            #kc.append(comicnm)
                                            if ("ANNUAL" in week['EXTRA']):
                                                watchfndextra.append("annual")
                                            else:
                                                watchfndextra.append("none")
                                            watchfnd.append(comicnm)
                                            watchfndiss.append(week['ISSUE'])
                                            ComicID = comicid[cnt]
                                            if not mylar.CV_ONLY:
                                                ComicIssue = str(watchfndiss[tot -1] + ".00")
                                            else:
                                                ComicIssue = str(watchfndiss[tot -1])
                                            ComicDate = str(week['SHIPDATE'])
                                            ComicName = str(unlines[cnt])
                                            logger.fdebug("Watchlist hit for : " + ComicName + " ISSUE: " + str(watchfndiss[tot -1]))
                                            # here we add to comics.latest
                                            updater.latest_update(ComicID=ComicID, LatestIssue=ComicIssue, LatestDate=ComicDate)
                                            # here we add to upcoming table...
                                            statusupdate = updater.upcoming_update(ComicID=ComicID, ComicName=ComicName, IssueNumber=ComicIssue, IssueDate=ComicDate, forcecheck=forcecheck)
                                            # here we update status of weekly table...
                                            if statusupdate is not None:
                                                cstatus = statusupdate['Status']
                                                cstatusid = statusupdate['ComicID']
                                            else:
                                                cstatus = None
                                                cstatusid = None
                                            updater.weekly_update(ComicName=week['COMIC'], IssueNumber=ComicIssue, CStatus=cstatus, CID=cstatusid)
                                            break
                                        break
                        break
                cnt-=1
        #print ("-------------------------")
        logger.fdebug("There are " + str(otot) + " comics this week to get!")
        #print ("However I've already grabbed " + str(btotal) )
        #print ("I need to get " + str(tot) + " comic(s)!" )
        logger.info(u"Finished checking for comics on my watchlist.")
    #con.close()
    return
コード例 #21
0
ファイル: Failed.py プロジェクト: DarkSir23/mylar
    def Process(self):
        module = '[FAILED-DOWNLOAD]'

        myDB = db.DBConnection()

        if self.nzb_name and self.nzb_folder:
            self._log('Failed download has been detected: ' + self.nzb_name + ' in ' + self.nzb_folder)

            #since this has already been passed through the search module, which holds the IssueID in the nzblog,
            #let's find the matching nzbname and pass it the IssueID in order to mark it as Failed and then return
            #to the search module and continue trucking along.

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:\?\'\(\)]', '', str(nzbname))
            nzbname = re.sub('[\&]', 'and', str(nzbname))
            nzbname = re.sub('_', '.', str(nzbname))

            logger.fdebug(module + ' After conversions, nzbname is : ' + str(nzbname))
            self._log("nzbname: " + str(nzbname))

            nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()

            if nzbiss is None:
                self._log("Failure - could not initially locate nzbfile in my database to rename.")
                logger.fdebug(module + ' Failure - could not locate nzbfile initially')
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname))
                logger.fdebug(module + ' Trying to locate nzbfile again with nzbname of : ' + str(nzbname))
                nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(module + ' Unable to locate downloaded file to rename. PostProcessing aborted.')
                    self._log('Unable to locate downloaded file to rename. PostProcessing aborted.')
                    self.valreturn.append({"self.log": self.log,
                                           "mode": 'stop'})

                    return self.queue.put(self.valreturn)
                else:
                    self._log("I corrected and found the nzb as : " + str(nzbname))
                    logger.fdebug(module + ' Auto-corrected and found the nzb as : ' + str(nzbname))
                    issueid = nzbiss['IssueID']
            else:
                issueid = nzbiss['IssueID']
                logger.fdebug(module + ' Issueid: ' + str(issueid))
                sarc = nzbiss['SARC']
                #use issueid to get publisher, series, year, issue number

        else:
            issueid = self.issueid
            nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?", [issueid]).fetchone()
            if nzbiss is None:
                logger.info(module + ' Cannot locate corresponding record in download history. This will be implemented soon.')
                self.valreturn.append({"self.log": self.log,
                                       "mode": 'stop'})
                return self.queue.put(self.valreturn)

            nzbname = nzbiss['NZBName']

        # find the provider.
        self.prov = nzbiss['PROVIDER']
        logger.info(module + ' Provider: ' + self.prov)

        # grab the id.
        self.id = nzbiss['ID']
        logger.info(module + ' ID: ' + self.id)
        annchk = "no"

        if 'annual' in nzbname.lower():
            logger.info(module + ' Annual detected.')
            annchk = "yes"
            issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
        else:
            issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()

        if issuenzb is not None:
            logger.info(module + ' issuenzb found.')
            if helpers.is_number(issueid):
                sandwich = int(issuenzb['IssueID'])
        else:
            logger.info(module + ' issuenzb not found.')
            #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
            #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
            if 'S' in issueid:
                sandwich = issueid
            elif 'G' in issueid or '-' in issueid:
                sandwich = 1
        if helpers.is_number(sandwich):
            if sandwich < 900000:
            # if sandwich is less than 900000 it's a normal watchlist download. Bypass.
                pass
        else:
            logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!')
            self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.')
            self.valreturn.append({"self.log": self.log,
                                   "mode": 'stop'})

            return self.queue.put(self.valreturn)

        comicid = issuenzb['ComicID']
        issuenumOG = issuenzb['Issue_Number']
        logger.info(module + ' Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' that was downloaded using ' + self.prov)
        self._log('Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' downloaded using ' + self.prov)

        logger.info(module + ' Marking as a Failed Download.')
        self._log('Marking as a Failed Download.')

        ctrlVal = {"IssueID": issueid}
        Vals = {"Status":    'Failed'}
        myDB.upsert("issues", Vals, ctrlVal)

        ctrlVal = {"ID":       self.id,
                   "Provider": self.prov,
                   "NZBName":  nzbname}
        Vals = {"Status":       'Failed',
                "ComicName":    issuenzb['ComicName'],
                "Issue_Number": issuenzb['Issue_Number'],
                "IssueID":      issueid,
                "ComicID":      comicid,
                "DateFailed":   helpers.now()}
        myDB.upsert("failed", Vals, ctrlVal)

        logger.info(module + ' Successfully marked as Failed.')
        self._log('Successfully marked as Failed.')

        if mylar.CONFIG.FAILED_AUTO:
            logger.info(module + ' Sending back to search to see if we can find something that will not fail.')
            self._log('Sending back to search to see if we can find something better that will not fail.')
            self.valreturn.append({"self.log":    self.log,
                                   "mode":        'retry',
                                   "issueid":     issueid,
                                   "comicid":     comicid,
                                   "comicname":   issuenzb['ComicName'],
                                   "issuenumber": issuenzb['Issue_Number'],
                                   "annchk":      annchk})

            return self.queue.put(self.valreturn)
        else:
            logger.info(module + ' Stopping search here as automatic handling of failed downloads is not enabled *hint*')
            self._log('Stopping search here as automatic handling of failed downloads is not enabled *hint*')
            self.valreturn.append({"self.log": self.log,
                                   "mode": 'stop'})
            return self.queue.put(self.valreturn)
コード例 #22
0
ファイル: opds.py プロジェクト: DarkSir23/mylar
    def _OneOffs(self, **kwargs):
        index = 0
        if 'index' in kwargs:
            index = int(kwargs['index'])
        links = []
        entries = []
        flist = []
        book = ''
        gbd = str(mylar.CONFIG.GRABBAG_DIR + '/*').encode('utf-8')
        flist = glob.glob(gbd)
        readlist = []
        for book in flist:
            issue = {}
            fileexists = True
            book = book.encode('utf-8')
            issue['Title'] = book
            issue['IssueID'] = book
            issue['fileloc'] = book
            issue['filename'] = book
            issue['image'] =  None
            issue['thumbnail'] = None
            issue['updated'] =  helpers.now()
            if not os.path.isfile(issue['fileloc']):
                fileexists = False
            if fileexists:
                readlist.append(issue)
        if len(readlist) > 0:
            if index <= len(readlist):
                subset = readlist[index:(index + self.PAGE_SIZE)]
                for issue in subset:
                    metainfo = None
                    metainfo = [{'writer': None,'summary': ''}]
                    entries.append(
                        {
                            'title': escape(issue['Title']),
                            'id': escape('comic:%s' % issue['IssueID']),
                            'updated': issue['updated'],
                            'content': escape('%s' % (metainfo[0]['summary'])),
                            'href': '%s?cmd=deliverFile&amp;file=%s&amp;filename=%s' % (self.opdsroot, quote_plus(issue['fileloc']), quote_plus(issue['filename'])),
                            'kind': 'acquisition',
                            'rel': 'file',
                            'author': metainfo[0]['writer'],
                            'image': issue['image'],
                            'thumbnail': issue['thumbnail'],
                        }
                    )

            feed = {}
            feed['title'] = 'Mylar OPDS - One-Offs'
            feed['id'] = escape('OneOffs')
            feed['updated'] = mylar.helpers.now()
            links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
            links.append(getLink(href='%s?cmd=OneOffs' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
            if len(readlist) > (index + self.PAGE_SIZE):
                links.append(
                    getLink(href='%s?cmd=OneOffs&amp;index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
            if index >= self.PAGE_SIZE:
                links.append(
                    getLink(href='%s?cmd=Read&amp;index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))

            feed['links'] = links
            feed['entries'] = entries
            self.data = feed
            return
コード例 #23
0
ファイル: logger.py プロジェクト: phairplay/mylar
 def emit(self, record):
     message = self.format(record)
     message = message.replace("\n", "<br />")
     mylar.LOG_LIST.insert(
         0, (helpers.now(), message, record.levelname, record.threadName))
コード例 #24
0
ファイル: importer.py プロジェクト: ndorman21/mylar
def GCDimport(gcomicid):
    # this is for importing via GCD only and not using CV.
    # used when volume spanning is discovered for a Comic (and can't be added using CV).
    # Issue Counts are wrong (and can't be added).

    # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
    # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)

    gcdcomicid = gcomicid
    myDB = db.DBConnection()

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID": gcdcomicid}

    comic = myDB.action(
        "SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation FROM comics WHERE ComicID=?",
        [gcomicid],
    ).fetchone()
    ComicName = comic[0]
    ComicYear = comic[1]
    ComicIssues = comic[2]
    comlocation = comic[5]
    # ComicImage = comic[4]
    # print ("Comic:" + str(ComicName))

    newValueDict = {"Status": "Loading"}
    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now
    # comic = cv.getComic(comicid,'comic')

    if not comic:
        logger.warn("Error fetching comic. ID for : " + gcdcomicid)
        if dbcomic is None:
            newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid), "Status": "Active"}
        else:
            newValueDict = {"Status": "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return

    if ComicName.startswith("The "):
        sortname = ComicName[4:]
    else:
        sortname = ComicName

    logger.info(u"Now adding/updating: " + ComicName)
    # --Now that we know ComicName, let's try some scraping
    # --Start
    # gcd will return issue details (most importantly publishing date)
    comicid = gcomicid[1:]
    resultURL = "/series/" + str(comicid) + "/"
    gcdinfo = parseit.GCDdetails(
        comseries=None,
        resultURL=resultURL,
        vari_loop=0,
        ComicID=gcdcomicid,
        TotalIssues=ComicIssues,
        issvariation=None,
        resultPublished=None,
    )
    if gcdinfo == "No Match":
        logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")")
        updater.no_searchresults(gcomicid)
        nomatch = "true"
        return nomatch
    logger.info(u"Sucessfully retrieved details for " + ComicName)
    # print ("Series Published" + parseit.resultPublished)
    # --End

    ComicImage = gcdinfo["ComicImage"]

    # comic book location on machine
    # setup default location here
    if comlocation is None:
        if ":" in ComicName or "/" in ComicName or "," in ComicName:
            comicdir = ComicName
            if ":" in comicdir:
                comicdir = comicdir.replace(":", "")
            if "/" in comicdir:
                comicdir = comicdir.replace("/", "-")
            if "," in comicdir:
                comicdir = comicdir.replace(",", "")
        else:
            comicdir = ComicName
        comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")"
        if mylar.DESTINATION_DIR == "":
            logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
            return
        if mylar.REPLACE_SPACES:
            # mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
            comlocation = comlocation.replace(" ", mylar.REPLACE_CHAR)
        # if it doesn't exist - create it (otherwise will bugger up later on)
        if os.path.isdir(str(comlocation)):
            logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
        else:
            # print ("Directory doesn't exist!")
            try:
                os.makedirs(str(comlocation))
                logger.info(u"Directory successfully created at: " + str(comlocation))
            except OSError:
                logger.error(u"Could not create comicdir : " + str(comlocation))

    comicIssues = gcdinfo["totalissues"]

    # let's download the image...
    if os.path.exists(mylar.CACHE_DIR):
        pass
    else:
        # let's make the dir.
        try:
            os.makedirs(str(mylar.CACHE_DIR))
            logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))

        except OSError:
            logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR))

    coverfile = mylar.CACHE_DIR + "/" + str(gcomicid) + ".jpg"

    urllib.urlretrieve(str(ComicImage), str(coverfile))
    try:
        with open(str(coverfile)) as f:
            ComicImage = "cache/" + str(gcomicid) + ".jpg"
            logger.info(u"Sucessfully retrieved cover for " + str(ComicName))
    except IOError as e:
        logger.error(u"Unable to save cover locally at this time.")

    controlValueDict = {"ComicID": gcomicid}
    newValueDict = {
        "ComicName": ComicName,
        "ComicSortName": sortname,
        "ComicYear": ComicYear,
        "Total": comicIssues,
        "ComicLocation": comlocation,
        "ComicImage": ComicImage,
        # "ComicPublisher":   comic['ComicPublisher'],
        # "ComicPublished":   comicPublished,
        "DateAdded": helpers.today(),
        "Status": "Loading",
    }

    myDB.upsert("comics", newValueDict, controlValueDict)

    logger.info(u"Sucessfully retrieved issue details for " + ComicName)
    n = 0
    iscnt = int(comicIssues)
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    # let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    # print ("total issues:" + str(iscnt))
    # ---removed NEW code here---
    logger.info(u"Now adding/updating issues for " + ComicName)
    bb = 0
    while bb <= iscnt:
        # ---NEW.code
        try:
            gcdval = gcdinfo["gcdchoice"][bb]
            # print ("gcdval: " + str(gcdval))
        except IndexError:
            # account for gcd variation here
            if gcdinfo["gcdvariation"] == "gcd":
                # print ("gcd-variation accounted for.")
                issdate = "0000-00-00"
                int_issnum = int(issis / 1000)
            break
        if "nn" in str(gcdval["GCDIssue"]):
            # no number detected - GN, TP or the like
            logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
            updater.no_searchresults(comicid)
            return
        elif "." in str(gcdval["GCDIssue"]):
            issst = str(gcdval["GCDIssue"]).find(".")
            issb4dec = str(gcdval["GCDIssue"])[:issst]
            # if the length of decimal is only 1 digit, assume it's a tenth
            decis = str(gcdval["GCDIssue"])[issst + 1 :]
            if len(decis) == 1:
                decisval = int(decis) * 10
                issaftdec = str(decisval)
            if len(decis) == 2:
                decisval = int(decis)
                issaftdec = str(decisval)
            if int(issaftdec) == 0:
                issaftdec = "00"
            gcd_issue = issb4dec + "." + issaftdec
            gcdis = (int(issb4dec) * 1000) + decisval
        else:
            gcdis = int(str(gcdval["GCDIssue"])) * 1000
            gcd_issue = str(gcdval["GCDIssue"])
        # get the latest issue / date using the date.
        int_issnum = int(gcdis / 1000)
        issdate = str(gcdval["GCDDate"])
        issid = "G" + str(gcdval["IssueID"])
        if gcdval["GCDDate"] > latestdate:
            latestiss = str(gcd_issue)
            latestdate = str(gcdval["GCDDate"])
        # print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
        # ---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone()

        # Only change the status & add DateAdded if the issue is not already in the database
        if iss_exists is None:
            newValueDict["DateAdded"] = helpers.today()

        # adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
        if "?" in str(issdate):
            issdate = "0000-00-00"

        controlValueDict = {"IssueID": issid}
        newValueDict = {
            "ComicID": gcomicid,
            "ComicName": ComicName,
            "Issue_Number": gcd_issue,
            "IssueDate": issdate,
            "Int_IssueNumber": int_issnum,
        }

        # print ("issueid:" + str(controlValueDict))
        # print ("values:" + str(newValueDict))

        if mylar.AUTOWANT_ALL:
            newValueDict["Status"] = "Wanted"
            # elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING:
            #    newValueDict['Status'] = "Wanted"
        else:
            newValueDict["Status"] = "Skipped"

        if iss_exists:
            # print ("Existing status : " + str(iss_exists['Status']))
            newValueDict["Status"] = iss_exists["Status"]

        myDB.upsert("issues", newValueDict, controlValueDict)
        bb += 1

    #        logger.debug(u"Updating comic cache for " + ComicName)
    #        cache.getThumb(ComicID=issue['issueid'])

    #        logger.debug(u"Updating cache for: " + ComicName)
    #        cache.getThumb(ComicIDcomicid)

    # check for existing files...
    updater.forceRescan(gcomicid)

    controlValueStat = {"ComicID": gcomicid}
    newValueStat = {
        "Status": "Active",
        "LatestIssue": latestiss,
        "LatestDate": latestdate,
        "LastUpdated": helpers.now(),
    }

    myDB.upsert("comics", newValueStat, controlValueStat)

    logger.info(u"Updating complete for: " + ComicName)

    # lets' check the pullist for anyting at this time as well since we're here.
    if mylar.AUTOWANT_UPCOMING:
        logger.info(u"Checking this week's pullist for new issues of " + str(ComicName))
        updater.newpullcheck()

    # here we grab issues that have been marked as wanted above...

    results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid])
    if results:
        logger.info(u"Attempting to grab wanted issues for : " + ComicName)

        for result in results:
            foundNZB = "none"
            if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
                foundNZB = search.searchforissue(result["IssueID"])
                if foundNZB == "yes":
                    updater.foundsearch(result["ComicID"], result["IssueID"])
    else:
        logger.info(u"No issues marked as wanted for " + ComicName)

    logger.info(u"Finished grabbing what I could.")
コード例 #25
0
ファイル: updater.py プロジェクト: wraslor/mylar
def foundsearch(ComicID, IssueID, mode=None, down=None, provider=None, SARC=None, IssueArcID=None):
    # When doing a Force Search (Wanted tab), the resulting search calls this to update.

    # this is all redudant code that forceRescan already does.
    # should be redone at some point so that instead of rescanning entire 
    # series directory, it just scans for the issue it just downloaded and
    # and change the status to Snatched accordingly. It is not to increment the have count
    # at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.
    myDB = db.DBConnection()

    logger.info('comicid: ' + str(ComicID))
    logger.info('issueid: ' + str(IssueID))
    if mode != 'story_arc':
        comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
        ComicName = comic['ComicName']
        if mode == 'want_ann':
            issue = myDB.action('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
        else:
            issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
        CYear = issue['IssueDate'][:4]

    else:
        issue = myDB.action('SELECT * FROM readinglist WHERE IssueArcID=?', [IssueArcID]).fetchone()
        ComicName = issue['ComicName']
        CYear = issue['IssueYEAR']

    if down is None:
        # update the status to Snatched (so it won't keep on re-downloading!)
        logger.fdebug('updating status to snatched')
        logger.fdebug('provider is ' + provider)
        newValue = {"Status":    "Snatched"}
        if mode == 'story_arc':
            cValue = {"IssueArcID": IssueArcID}
            snatchedupdate = {"IssueArcID": IssueArcID}
            myDB.upsert("readinglist", newValue, cValue)
            # update the snatched DB
            snatchedupdate = {"IssueID":     IssueArcID,
                              "Status":      "Snatched",
                              "Provider":    provider
                              }

        else:
            if mode == 'want_ann':
                controlValue = {"IssueID":   IssueID}
                myDB.upsert("annuals", newValue, controlValue)
            else:
                controlValue = {"IssueID":   IssueID}
                myDB.upsert("issues", newValue, controlValue)

            # update the snatched DB
            snatchedupdate = {"IssueID":     IssueID,
                              "Status":      "Snatched",
                              "Provider":    provider
                              }

        if mode == 'story_arc':
            IssueNum = issue['IssueNumber']
            newsnatchValues = {"ComicName":       ComicName,
                               "ComicID":         'None',
                               "Issue_Number":    IssueNum,
                               "DateAdded":       helpers.now(),
                               "Status":          "Snatched"
                               }
        else:
            if mode == 'want_ann':
                IssueNum = "Annual " + issue['Issue_Number']
            else:
                IssueNum = issue['Issue_Number']

            newsnatchValues = {"ComicName":       ComicName,
                               "ComicID":         ComicID,
                               "Issue_Number":    IssueNum,
                               "DateAdded":       helpers.now(),
                               "Status":          "Snatched"
                               }
        myDB.upsert("snatched", newsnatchValues, snatchedupdate)
        logger.info("updated the snatched.")
    else:
        logger.info("updating the downloaded.")
        if mode == 'want_ann':
            IssueNum = "Annual " + issue['Issue_Number']
        elif mode == 'story_arc':
            IssueNum = issue['IssueNumber']
            IssueID = IssueArcID
        else:
            IssueNum = issue['Issue_Number']

        snatchedupdate = {"IssueID":     IssueID,
                          "Status":      "Downloaded",
                          "Provider":    provider
                          }
        newsnatchValues = {"ComicName":       ComicName,
                           "ComicID":         ComicID,
                           "Issue_Number":    IssueNum,
                           "DateAdded":       helpers.now(),
                           "Status":          "Downloaded"
                           }
        myDB.upsert("snatched", newsnatchValues, snatchedupdate)

        if mode == 'story_arc':
            cValue = {"IssueArcID":   IssueArcID}
            nValue = {"Status":       "Downloaded"}
            myDB.upsert("readinglist", nValue, cValue)

        else:
            controlValue = {"IssueID":   IssueID}
            newValue = {"Status":    "Downloaded"}

            myDB.upsert("issues", newValue, controlValue)

    #print ("finished updating snatched db.")
    logger.info('Updating now complete for ' + ComicName + ' issue: ' + str(IssueNum))
    return
コード例 #26
0
ファイル: opds.py プロジェクト: sukhysall/mylar3
    def _OneOffs(self, **kwargs):
        index = 0
        if 'index' in kwargs:
            index = int(kwargs['index'])
        links = []
        entries = []
        flist = []
        book = ''
        gbd = str(mylar.CONFIG.GRABBAG_DIR + '/*')
        flist = glob.glob(gbd)
        readlist = []
        for book in flist:
            issue = {}
            fileexists = True
            book = book
            issue['Title'] = book
            issue['IssueID'] = book
            issue['fileloc'] = book
            issue['filename'] = book
            issue['image'] =  None
            issue['thumbnail'] = None
            issue['updated'] =  helpers.now()
            if not os.path.isfile(issue['fileloc']):
                fileexists = False
            if fileexists:
                readlist.append(issue)
        if len(readlist) > 0:
            if index <= len(readlist):
                subset = readlist[index:(index + self.PAGE_SIZE)]
                for issue in subset:
                    metainfo = None
                    metainfo = [{'writer': None,'summary': ''}]
                    entries.append(
                        {
                            'title': escape(issue['Title']),
                            'id': escape('comic:%s' % issue['IssueID']),
                            'updated': issue['updated'],
                            'content': escape('%s' % (metainfo[0]['summary'])),
                            'href': '%s?cmd=deliverFile&amp;file=%s&amp;filename=%s' % (self.opdsroot, quote_plus(issue['fileloc']), quote_plus(issue['filename'])),
                            'kind': 'acquisition',
                            'rel': 'file',
                            'author': metainfo[0]['writer'],
                            'image': issue['image'],
                            'thumbnail': issue['thumbnail'],
                        }
                    )

            feed = {}
            feed['title'] = 'Mylar OPDS - One-Offs'
            feed['id'] = escape('OneOffs')
            feed['updated'] = mylar.helpers.now()
            links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
            links.append(getLink(href='%s?cmd=OneOffs' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
            if len(readlist) > (index + self.PAGE_SIZE):
                links.append(
                    getLink(href='%s?cmd=OneOffs&amp;index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
            if index >= self.PAGE_SIZE:
                links.append(
                    getLink(href='%s?cmd=Read&amp;index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))

            feed['links'] = links
            feed['entries'] = entries
            self.data = feed
            return
コード例 #27
0
ファイル: importer.py プロジェクト: mriutta/mylar
def GCDimport(gcomicid, pullupd=None, imported=None, ogcname=None):
    # this is for importing via GCD only and not using CV.
    # used when volume spanning is discovered for a Comic (and can't be added using CV).
    # Issue Counts are wrong (and can't be added).

    # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
    # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)

    gcdcomicid = gcomicid
    myDB = db.DBConnection()

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID": gcdcomicid}

    comic = myDB.action(
        'SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?',
        [gcomicid]).fetchone()
    ComicName = comic[0]
    ComicYear = comic[1]
    ComicIssues = comic[2]
    ComicPublished = comic[3]
    comlocation = comic[5]
    ComicPublisher = comic[6]
    #ComicImage = comic[4]
    #print ("Comic:" + str(ComicName))

    newValueDict = {"Status": "Loading"}
    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now
    #comic = cv.getComic(comicid,'comic')

    if not comic:
        logger.warn("Error fetching comic. ID for : " + gcdcomicid)
        if dbcomic is None:
            newValueDict = {
                "ComicName":
                "Fetch failed, try refreshing. (%s)" % (gcdcomicid),
                "Status": "Active"
            }
        else:
            newValueDict = {"Status": "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return

    #run the re-sortorder here in order to properly display the page
    if pullupd is None:
        helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid)

    if ComicName.startswith('The '):
        sortname = ComicName[4:]
    else:
        sortname = ComicName

    logger.info(u"Now adding/updating: " + ComicName)
    #--Now that we know ComicName, let's try some scraping
    #--Start
    # gcd will return issue details (most importantly publishing date)
    comicid = gcomicid[1:]
    resultURL = "/series/" + str(comicid) + "/"
    gcdinfo = parseit.GCDdetails(comseries=None,
                                 resultURL=resultURL,
                                 vari_loop=0,
                                 ComicID=gcdcomicid,
                                 TotalIssues=ComicIssues,
                                 issvariation=None,
                                 resultPublished=None)
    if gcdinfo == "No Match":
        logger.warn("No matching result found for " + ComicName + " (" +
                    ComicYear + ")")
        updater.no_searchresults(gcomicid)
        nomatch = "true"
        return nomatch
    logger.info(u"Sucessfully retrieved details for " + ComicName)
    # print ("Series Published" + parseit.resultPublished)
    #--End

    ComicImage = gcdinfo['ComicImage']

    #comic book location on machine
    # setup default location here
    if comlocation is None:
        # let's remove the non-standard characters here.
        u_comicnm = ComicName
        u_comicname = u_comicnm.encode('ascii', 'ignore').strip()
        if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname:
            comicdir = u_comicname
            if ':' in comicdir:
                comicdir = comicdir.replace(':', '')
            if '/' in comicdir:
                comicdir = comicdir.replace('/', '-')
            if ',' in comicdir:
                comicdir = comicdir.replace(',', '')
        else:
            comicdir = u_comicname

        series = comicdir
        publisher = ComicPublisher
        year = ComicYear

        #do work to generate folder path
        values = {
            '$Series': series,
            '$Publisher': publisher,
            '$Year': year,
            '$series': series.lower(),
            '$publisher': publisher.lower(),
            '$Volume': year
        }

        if mylar.FOLDER_FORMAT == '':
            comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic[
                'ComicYear'] + ")"
        else:
            comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(
                mylar.FOLDER_FORMAT, values)

        #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")"
        if mylar.DESTINATION_DIR == "":
            logger.error(
                u"There is no general directory specified - please specify in Config/Post-Processing."
            )
            return
        if mylar.REPLACE_SPACES:
            #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
            comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)

    #if it doesn't exist - create it (otherwise will bugger up later on)
    if os.path.isdir(str(comlocation)):
        logger.info(u"Directory (" + str(comlocation) +
                    ") already exists! Continuing...")
    else:
        #print ("Directory doesn't exist!")
        #try:
        #    os.makedirs(str(comlocation))
        #    logger.info(u"Directory successfully created at: " + str(comlocation))
        #except OSError:
        #    logger.error(u"Could not create comicdir : " + str(comlocation))
        filechecker.validateAndCreateDirectory(comlocation, True)

    comicIssues = gcdinfo['totalissues']

    #let's download the image...
    if os.path.exists(mylar.CACHE_DIR): pass
    else:
        #let's make the dir.
        try:
            os.makedirs(str(mylar.CACHE_DIR))
            logger.info(u"Cache Directory successfully created at: " +
                        str(mylar.CACHE_DIR))

        except OSError:
            logger.error(u"Could not create cache dir : " +
                         str(mylar.CACHE_DIR))

    coverfile = os.path.join(mylar.CACHE_DIR, str(gcomicid) + ".jpg")

    #try:
    urllib.urlretrieve(str(ComicImage), str(coverfile))
    try:
        with open(str(coverfile)) as f:
            ComicImage = os.path.join('cache', str(gcomicid) + ".jpg")

            #this is for Firefox when outside the LAN...it works, but I don't know how to implement it
            #without breaking the normal flow for inside the LAN (above)
            #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comi$

            logger.info(u"Sucessfully retrieved cover for " + ComicName)
            #if the comic cover local is checked, save a cover.jpg to the series folder.
            if mylar.COMIC_COVER_LOCAL:
                comiclocal = os.path.join(str(comlocation) + "/cover.jpg")
                shutil.copy(ComicImage, comiclocal)
    except IOError as e:
        logger.error(u"Unable to save cover locally at this time.")

    #if comic['ComicVersion'].isdigit():
    #    comicVol = "v" + comic['ComicVersion']
    #else:
    #    comicVol = None

    controlValueDict = {"ComicID": gcomicid}
    newValueDict = {
        "ComicName": ComicName,
        "ComicSortName": sortname,
        "ComicYear": ComicYear,
        "Total": comicIssues,
        "ComicLocation": comlocation,
        #"ComicVersion":     comicVol,
        "ComicImage": ComicImage,
        #"ComicPublisher":   comic['ComicPublisher'],
        #"ComicPublished":   comicPublished,
        "DateAdded": helpers.today(),
        "Status": "Loading"
    }

    myDB.upsert("comics", newValueDict, controlValueDict)

    #comicsort here...
    #run the re-sortorder here in order to properly display the page
    if pullupd is None:
        helpers.ComicSort(sequence='update')

    logger.info(u"Sucessfully retrieved issue details for " + ComicName)
    n = 0
    iscnt = int(comicIssues)
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    #print ("total issues:" + str(iscnt))
    #---removed NEW code here---
    logger.info(u"Now adding/updating issues for " + ComicName)
    bb = 0
    while (bb <= iscnt):
        #---NEW.code
        try:
            gcdval = gcdinfo['gcdchoice'][bb]
            #print ("gcdval: " + str(gcdval))
        except IndexError:
            #account for gcd variation here
            if gcdinfo['gcdvariation'] == 'gcd':
                #print ("gcd-variation accounted for.")
                issdate = '0000-00-00'
                int_issnum = int(issis / 1000)
            break
        if 'nn' in str(gcdval['GCDIssue']):
            #no number detected - GN, TP or the like
            logger.warn(
                u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time."
            )
            updater.no_searchresults(comicid)
            return
        elif '.' in str(gcdval['GCDIssue']):
            issst = str(gcdval['GCDIssue']).find('.')
            issb4dec = str(gcdval['GCDIssue'])[:issst]
            #if the length of decimal is only 1 digit, assume it's a tenth
            decis = str(gcdval['GCDIssue'])[issst + 1:]
            if len(decis) == 1:
                decisval = int(decis) * 10
                issaftdec = str(decisval)
            if len(decis) == 2:
                decisval = int(decis)
                issaftdec = str(decisval)
            if int(issaftdec) == 0: issaftdec = "00"
            gcd_issue = issb4dec + "." + issaftdec
            gcdis = (int(issb4dec) * 1000) + decisval
        else:
            gcdis = int(str(gcdval['GCDIssue'])) * 1000
            gcd_issue = str(gcdval['GCDIssue'])
        #get the latest issue / date using the date.
        int_issnum = int(gcdis / 1000)
        issdate = str(gcdval['GCDDate'])
        issid = "G" + str(gcdval['IssueID'])
        if gcdval['GCDDate'] > latestdate:
            latestiss = str(gcd_issue)
            latestdate = str(gcdval['GCDDate'])
        #print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
        #---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?',
                                 [issid]).fetchone()

        # Only change the status & add DateAdded if the issue is not already in the database
        if iss_exists is None:
            newValueDict['DateAdded'] = helpers.today()

        #adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
        if "?" in str(issdate):
            issdate = "0000-00-00"

        controlValueDict = {"IssueID": issid}
        newValueDict = {
            "ComicID": gcomicid,
            "ComicName": ComicName,
            "Issue_Number": gcd_issue,
            "IssueDate": issdate,
            "Int_IssueNumber": int_issnum
        }

        #print ("issueid:" + str(controlValueDict))
        #print ("values:" + str(newValueDict))

        if mylar.AUTOWANT_ALL:
            newValueDict['Status'] = "Wanted"
        elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING:
            newValueDict['Status'] = "Wanted"
        else:
            newValueDict['Status'] = "Skipped"

        if iss_exists:
            #print ("Existing status : " + str(iss_exists['Status']))
            newValueDict['Status'] = iss_exists['Status']

        myDB.upsert("issues", newValueDict, controlValueDict)
        bb += 1

#        logger.debug(u"Updating comic cache for " + ComicName)
#        cache.getThumb(ComicID=issue['issueid'])

#        logger.debug(u"Updating cache for: " + ComicName)
#        cache.getThumb(ComicIDcomicid)

    controlValueStat = {"ComicID": gcomicid}
    newValueStat = {
        "Status": "Active",
        "LatestIssue": latestiss,
        "LatestDate": latestdate,
        "LastUpdated": helpers.now()
    }

    myDB.upsert("comics", newValueStat, controlValueStat)

    if mylar.CVINFO:
        if not os.path.exists(comlocation + "/cvinfo"):
            with open(comlocation + "/cvinfo", "w") as text_file:
                text_file.write("http://www.comicvine.com/volume/49-" +
                                str(comicid))

    logger.info(u"Updating complete for: " + ComicName)

    #move the files...if imported is not empty (meaning it's not from the mass importer.)
    if imported is None or imported == 'None':
        pass
    else:
        if mylar.IMP_MOVE:
            logger.info("Mass import - Move files")
            moveit.movefiles(gcomicid, comlocation, ogcname)
        else:
            logger.info(
                "Mass import - Moving not Enabled. Setting Archived Status for import."
            )
            moveit.archivefiles(gcomicid, ogcname)

    #check for existing files...
    updater.forceRescan(gcomicid)

    if pullupd is None:
        # lets' check the pullist for anyting at this time as well since we're here.
        if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished:
            logger.info(u"Checking this week's pullist for new issues of " +
                        ComicName)
            updater.newpullcheck(comic['ComicName'], gcomicid)

        #here we grab issues that have been marked as wanted above...

        results = myDB.select(
            "SELECT * FROM issues where ComicID=? AND Status='Wanted'",
            [gcomicid])
        if results:
            logger.info(u"Attempting to grab wanted issues for : " + ComicName)

            for result in results:
                foundNZB = "none"
                if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL
                        or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST):
                    foundNZB = search.searchforissue(result['IssueID'])
                    if foundNZB == "yes":
                        updater.foundsearch(result['ComicID'],
                                            result['IssueID'])
        else:
            logger.info(u"No issues marked as wanted for " + ComicName)

        logger.info(u"Finished grabbing what I could.")
コード例 #28
0
    def Process(self):
        module = '[FAILED-DOWNLOAD]'

        myDB = db.DBConnection()

        if self.nzb_name and self.nzb_folder:
            self._log('Failed download has been detected: ' + self.nzb_name +
                      ' in ' + self.nzb_folder)

            #since this has already been passed through the search module, which holds the IssueID in the nzblog,
            #let's find the matching nzbname and pass it the IssueID in order to mark it as Failed and then return
            #to the search module and continue trucking along.

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:\?\'\(\)]', '', str(nzbname))
            nzbname = re.sub('[\&]', 'and', str(nzbname))
            nzbname = re.sub('_', '.', str(nzbname))

            logger.fdebug(module + ' After conversions, nzbname is : ' +
                          str(nzbname))
            self._log("nzbname: " + str(nzbname))

            nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?",
                                    [nzbname]).fetchone()

            if nzbiss is None:
                self._log(
                    "Failure - could not initially locate nzbfile in my database to rename."
                )
                logger.fdebug(module +
                              ' Failure - could not locate nzbfile initially')
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname))
                logger.fdebug(
                    module +
                    ' Trying to locate nzbfile again with nzbname of : ' +
                    str(nzbname))
                nzbiss = myDB.selectone("SELECT * from nzblog WHERE nzbname=?",
                                        [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(
                        module +
                        ' Unable to locate downloaded file to rename. PostProcessing aborted.'
                    )
                    self._log(
                        'Unable to locate downloaded file to rename. PostProcessing aborted.'
                    )
                    self.valreturn.append({
                        "self.log": self.log,
                        "mode": 'stop'
                    })

                    return self.queue.put(self.valreturn)
                else:
                    self._log("I corrected and found the nzb as : " +
                              str(nzbname))
                    logger.fdebug(module +
                                  ' Auto-corrected and found the nzb as : ' +
                                  str(nzbname))
                    issueid = nzbiss['IssueID']
            else:
                issueid = nzbiss['IssueID']
                logger.fdebug(module + ' Issueid: ' + str(issueid))
                sarc = nzbiss['SARC']
                #use issueid to get publisher, series, year, issue number

        else:
            issueid = self.issueid
            nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?",
                                    [issueid]).fetchone()
            if nzbiss is None:
                logger.info(
                    module +
                    ' Cannot locate corresponding record in download history. This will be implemented soon.'
                )
                self.valreturn.append({"self.log": self.log, "mode": 'stop'})
                return self.queue.put(self.valreturn)

            nzbname = nzbiss['NZBName']

        # find the provider.
        self.prov = nzbiss['PROVIDER']
        logger.info(module + ' Provider: ' + self.prov)

        # grab the id.
        self.id = nzbiss['ID']
        logger.info(module + ' ID: ' + self.id)
        annchk = "no"

        if 'annual' in nzbname.lower():
            logger.info(module + ' Annual detected.')
            annchk = "yes"
            issuenzb = myDB.selectone(
                "SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL",
                [issueid]).fetchone()
        else:
            issuenzb = myDB.selectone(
                "SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL",
                [issueid]).fetchone()

        if issuenzb is not None:
            logger.info(module + ' issuenzb found.')
            if helpers.is_number(issueid):
                sandwich = int(issuenzb['IssueID'])
        else:
            logger.info(module + ' issuenzb not found.')
            #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
            #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
            if 'S' in issueid:
                sandwich = issueid
            elif 'G' in issueid or '-' in issueid:
                sandwich = 1
        if helpers.is_number(sandwich):
            if sandwich < 900000:
                # if sandwich is less than 900000 it's a normal watchlist download. Bypass.
                pass
        else:
            logger.info(
                'Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!'
            )
            self._log(
                ' Unable to locate downloaded file to rename. PostProcessing aborted.'
            )
            self.valreturn.append({"self.log": self.log, "mode": 'stop'})

            return self.queue.put(self.valreturn)

        comicid = issuenzb['ComicID']
        issuenumOG = issuenzb['Issue_Number']
        logger.info(module + ' Successfully detected as : ' +
                    issuenzb['ComicName'] + ' issue: ' +
                    str(issuenzb['Issue_Number']) +
                    ' that was downloaded using ' + self.prov)
        self._log('Successfully detected as : ' + issuenzb['ComicName'] +
                  ' issue: ' + str(issuenzb['Issue_Number']) +
                  ' downloaded using ' + self.prov)

        logger.info(module + ' Marking as a Failed Download.')
        self._log('Marking as a Failed Download.')

        ctrlVal = {"IssueID": issueid}
        Vals = {"Status": 'Failed'}
        myDB.upsert("issues", Vals, ctrlVal)

        ctrlVal = {"ID": self.id, "Provider": self.prov, "NZBName": nzbname}
        Vals = {
            "Status": 'Failed',
            "ComicName": issuenzb['ComicName'],
            "Issue_Number": issuenzb['Issue_Number'],
            "IssueID": issueid,
            "ComicID": comicid,
            "DateFailed": helpers.now()
        }
        myDB.upsert("failed", Vals, ctrlVal)

        logger.info(module + ' Successfully marked as Failed.')
        self._log('Successfully marked as Failed.')

        if mylar.CONFIG.FAILED_AUTO:
            logger.info(
                module +
                ' Sending back to search to see if we can find something that will not fail.'
            )
            self._log(
                'Sending back to search to see if we can find something better that will not fail.'
            )
            self.valreturn.append({
                "self.log": self.log,
                "mode": 'retry',
                "issueid": issueid,
                "comicid": comicid,
                "comicname": issuenzb['ComicName'],
                "issuenumber": issuenzb['Issue_Number'],
                "annchk": annchk
            })

            return self.queue.put(self.valreturn)
        else:
            logger.info(
                module +
                ' Stopping search here as automatic handling of failed downloads is not enabled *hint*'
            )
            self._log(
                'Stopping search here as automatic handling of failed downloads is not enabled *hint*'
            )
            self.valreturn.append({"self.log": self.log, "mode": 'stop'})
            return self.queue.put(self.valreturn)
コード例 #29
0
    def run(self):

        with rss_lock:

            logger.info('RSS Feed Check was last run at : ' +
                        str(mylar.RSS_LASTRUN))
            firstrun = "no"
            #check the last run of rss to make sure it's not hammering.
            if mylar.RSS_LASTRUN is None or mylar.RSS_LASTRUN == '' or mylar.RSS_LASTRUN == '0' or self.forcerss == True:
                logger.info('RSS Feed Check First Ever Run.')
                firstrun = "yes"
                mins = 0
            else:
                c_obj_date = datetime.datetime.strptime(
                    mylar.RSS_LASTRUN, "%Y-%m-%d %H:%M:%S")
                n_date = datetime.datetime.now()
                absdiff = abs(n_date - c_obj_date)
                mins = (absdiff.days * 24 * 60 * 60 +
                        absdiff.seconds) / 60.0  #3600 is for hours.

            if firstrun == "no" and mins < int(mylar.RSS_CHECKINTERVAL):
                logger.fdebug(
                    'RSS Check has taken place less than the threshold - not initiating at this time.'
                )
                return

            mylar.RSS_LASTRUN = helpers.now()
            logger.fdebug('Updating RSS Run time to : ' +
                          str(mylar.RSS_LASTRUN))
            mylar.config_write()

            #function for looping through nzbs/torrent feeds
            if mylar.ENABLE_TORRENT_SEARCH:
                logger.info('[RSS] Initiating Torrent RSS Check.')
                if mylar.ENABLE_KAT:
                    logger.info(
                        '[RSS] Initiating Torrent RSS Feed Check on KAT.')
                    rsscheck.torrents(pickfeed='3')
                    rsscheck.torrents(pickfeed='6')
                if mylar.ENABLE_32P:
                    logger.info(
                        '[RSS] Initiating Torrent RSS Feed Check on 32P.')
                    if mylar.MODE_32P == 0:
                        logger.fdebug(
                            '[RSS] 32P mode set to Legacy mode. Monitoring New Releases feed only.'
                        )
                        if any([
                                mylar.PASSKEY_32P is None,
                                mylar.PASSKEY_32P == '',
                                mylar.RSSFEED_32P is None,
                                mylar.RSSFEED_32P == ''
                        ]):
                            logger.error(
                                '[RSS] Unable to validate information from provided RSS Feed. Verify that the feed provided is a current one.'
                            )
                        else:
                            rsscheck.torrents(pickfeed='1',
                                              feedinfo=mylar.KEYS_32P)
                    else:
                        logger.fdebug(
                            '[RSS] 32P mode set to Auth mode. Monitoring all personal notification feeds & New Releases feed'
                        )
                        if any([
                                mylar.USERNAME_32P is None,
                                mylar.USERNAME_32P == '',
                                mylar.PASSWORD_32P is None
                        ]):
                            logger.error(
                                '[RSS] Unable to sign-on to 32P to validate settings. Please enter/check your username password in the configuration.'
                            )
                        else:
                            if mylar.KEYS_32P is None:
                                feed32p = auth32p.info32p()
                                feedinfo = feed32p.authenticate()
                            else:
                                feedinfo = mylar.FEEDINFO_32P

                            if feedinfo is None or len(feedinfo) == 0:
                                logger.error(
                                    '[RSS] Unable to retrieve any information from 32P for RSS Feeds. Skipping for now.'
                                )
                            else:
                                rsscheck.torrents(pickfeed='1',
                                                  feedinfo=feedinfo[0])
                                x = 0
                                #assign personal feeds for 32p > +8
                                for fi in feedinfo:
                                    x += 1
                                    pfeed_32p = str(7 + x)
                                    rsscheck.torrents(pickfeed=pfeed_32p,
                                                      feedinfo=fi)

            logger.info('[RSS] Initiating RSS Feed Check for NZB Providers.')
            rsscheck.nzbs(forcerss=self.forcerss)
            logger.info('[RSS] RSS Feed Check/Update Complete')
            logger.info('[RSS] Watchlist Check for new Releases')
            mylar.search.searchforissue(rsscheck='yes')
            logger.info('[RSS] Watchlist Check complete.')
            if self.forcerss:
                logger.info('[RSS] Successfully ran a forced RSS Check.')
            return
コード例 #30
0
ファイル: importer.py プロジェクト: ndorman21/mylar
def addComictoDB(comicid, mismatch=None):
    # Putting this here to get around the circular import. Will try to use this to update images at later date.
    from mylar import cache

    myDB = db.DBConnection()

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID": comicid}

    dbcomic = myDB.action("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone()
    if dbcomic is None:
        newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"}
        comlocation = None
    else:
        newValueDict = {"Status": "Loading"}
        comlocation = dbcomic["ComicLocation"]

    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now
    comic = cv.getComic(comicid, "comic")
    # comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone()
    if not comic:
        logger.warn("Error fetching comic. ID for : " + comicid)
        if dbcomic is None:
            newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"}
        else:
            newValueDict = {"Status": "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return

    if comic["ComicName"].startswith("The "):
        sortname = comic["ComicName"][4:]
    else:
        sortname = comic["ComicName"]

    logger.info(u"Now adding/updating: " + comic["ComicName"])
    # --Now that we know ComicName, let's try some scraping
    # --Start
    # gcd will return issue details (most importantly publishing date)
    if mismatch == "no" or mismatch is None:
        gcdinfo = parseit.GCDScraper(comic["ComicName"], comic["ComicYear"], comic["ComicIssues"], comicid)
        mismatch_com = "no"
        if gcdinfo == "No Match":
            updater.no_searchresults(comicid)
            nomatch = "true"
            logger.info(
                u"There was an error when trying to add " + comic["ComicName"] + " (" + comic["ComicYear"] + ")"
            )
            return nomatch
        else:
            mismatch_com = "yes"
            # print ("gcdinfo:" + str(gcdinfo))

    elif mismatch == "yes":
        CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
        if CV_EXcomicid["variloop"] is None:
            pass
        else:
            vari_loop = CV_EXcomicid["variloop"]
            NewComicID = CV_EXcomicid["NewComicID"]
            gcomicid = CV_EXcomicid["GComicID"]
            resultURL = "/series/" + str(NewComicID) + "/"
            # print ("variloop" + str(CV_EXcomicid['variloop']))
            # if vari_loop == '99':
            gcdinfo = parseit.GCDdetails(
                comseries=None,
                resultURL=resultURL,
                vari_loop=0,
                ComicID=comicid,
                TotalIssues=0,
                issvariation="no",
                resultPublished=None,
            )

    logger.info(u"Sucessfully retrieved details for " + comic["ComicName"])
    # print ("Series Published" + parseit.resultPublished)

    # comic book location on machine
    # setup default location here

    if comlocation is None:
        if ":" in comic["ComicName"] or "/" in comic["ComicName"] or "," in comic["ComicName"]:
            comicdir = comic["ComicName"]
            if ":" in comicdir:
                comicdir = comicdir.replace(":", "")
            if "/" in comicdir:
                comicdir = comicdir.replace("/", "-")
            if "," in comicdir:
                comicdir = comicdir.replace(",", "")
        else:
            comicdir = comic["ComicName"]

        series = comicdir
        publisher = comic["ComicPublisher"]
        year = comic["ComicYear"]

        # do work to generate folder path

        values = {"$Series": series, "$Publisher": publisher, "$Year": year}

        # print mylar.FOLDER_FORMAT
        # print 'working dir:'
        # print helpers.replace_all(mylar.FOLDER_FORMAT, values)

        if mylar.FOLDER_FORMAT == "":
            comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic["ComicYear"] + ")"
        else:
            comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values)

        # comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
        if mylar.DESTINATION_DIR == "":
            logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
            return
        if mylar.REPLACE_SPACES:
            # mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
            comlocation = comlocation.replace(" ", mylar.REPLACE_CHAR)
        # if it doesn't exist - create it (otherwise will bugger up later on)
        if os.path.isdir(str(comlocation)):
            logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
        else:
            # print ("Directory doesn't exist!")
            try:
                os.makedirs(str(comlocation))
                logger.info(u"Directory successfully created at: " + str(comlocation))
            except OSError:
                logger.error(u"Could not create comicdir : " + str(comlocation))

    # try to account for CV not updating new issues as fast as GCD
    # seems CV doesn't update total counts
    # comicIssues = gcdinfo['totalissues']
    if gcdinfo["gcdvariation"] == "cv":
        comicIssues = str(int(comic["ComicIssues"]) + 1)
    else:
        comicIssues = comic["ComicIssues"]

    # let's download the image...
    if os.path.exists(mylar.CACHE_DIR):
        pass
    else:
        # let's make the dir.
        try:
            os.makedirs(str(mylar.CACHE_DIR))
            logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))

        except OSError:
            logger.error("Could not create cache dir. Check permissions of cache dir: " + str(mylar.CACHE_DIR))

    coverfile = mylar.CACHE_DIR + "/" + str(comicid) + ".jpg"

    # try:
    urllib.urlretrieve(str(comic["ComicImage"]), str(coverfile))
    try:
        with open(str(coverfile)) as f:
            ComicImage = "cache/" + str(comicid) + ".jpg"
            logger.info(u"Sucessfully retrieved cover for " + str(comic["ComicName"]))
    except IOError as e:
        logger.error(u"Unable to save cover locally at this time.")

    controlValueDict = {"ComicID": comicid}
    newValueDict = {
        "ComicName": comic["ComicName"],
        "ComicSortName": sortname,
        "ComicYear": comic["ComicYear"],
        "ComicImage": ComicImage,
        "Total": comicIssues,
        "ComicLocation": comlocation,
        "ComicPublisher": comic["ComicPublisher"],
        "ComicPublished": gcdinfo["resultPublished"],
        "DateAdded": helpers.today(),
        "Status": "Loading",
    }

    myDB.upsert("comics", newValueDict, controlValueDict)

    issued = cv.getComic(comicid, "issue")
    logger.info(u"Sucessfully retrieved issue details for " + comic["ComicName"])
    n = 0
    iscnt = int(comicIssues)
    issid = []
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    # let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    # print ("total issues:" + str(iscnt))
    # ---removed NEW code here---
    logger.info(u"Now adding/updating issues for " + comic["ComicName"])

    # file check to see if issue exists
    logger.info(u"Checking directory for existing issues.")
    # fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName'])
    # havefiles = 0

    # fccnt = int(fc['comiccount'])
    # logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying")
    # fcnew = []

    while n <= iscnt:
        # ---NEW.code
        try:
            firstval = issued["issuechoice"][n]
        except IndexError:
            break
        cleanname = helpers.cleanName(firstval["Issue_Name"])
        issid = str(firstval["Issue_ID"])
        issnum = str(firstval["Issue_Number"])
        issname = cleanname
        if "." in str(issnum):
            issn_st = str(issnum).find(".")
            issn_b4dec = str(issnum)[:issn_st]
            # if the length of decimal is only 1 digit, assume it's a tenth
            dec_is = str(issnum)[issn_st + 1 :]
            if len(dec_is) == 1:
                dec_nisval = int(dec_is) * 10
                iss_naftdec = str(dec_nisval)
            if len(dec_is) == 2:
                dec_nisval = int(dec_is)
                iss_naftdec = str(dec_nisval)
            iss_issue = issn_b4dec + "." + iss_naftdec
            issis = (int(issn_b4dec) * 1000) + dec_nisval
        else:
            issis = int(issnum) * 1000

        bb = 0
        while bb <= iscnt:
            try:
                gcdval = gcdinfo["gcdchoice"][bb]
            except IndexError:
                # account for gcd variation here
                if gcdinfo["gcdvariation"] == "gcd":
                    # print ("gcd-variation accounted for.")
                    issdate = "0000-00-00"
                    int_issnum = int(issis / 1000)
                break
            if "nn" in str(gcdval["GCDIssue"]):
                # no number detected - GN, TP or the like
                logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
                updater.no_searchresults(comicid)
                return
            elif "." in str(gcdval["GCDIssue"]):
                # print ("g-issue:" + str(gcdval['GCDIssue']))
                issst = str(gcdval["GCDIssue"]).find(".")
                # print ("issst:" + str(issst))
                issb4dec = str(gcdval["GCDIssue"])[:issst]
                # print ("issb4dec:" + str(issb4dec))
                # if the length of decimal is only 1 digit, assume it's a tenth
                decis = str(gcdval["GCDIssue"])[issst + 1 :]
                # print ("decis:" + str(decis))
                if len(decis) == 1:
                    decisval = int(decis) * 10
                    issaftdec = str(decisval)
                if len(decis) == 2:
                    decisval = int(decis)
                    issaftdec = str(decisval)
                gcd_issue = issb4dec + "." + issaftdec
                # print ("gcd_issue:" + str(gcd_issue))
                gcdis = (int(issb4dec) * 1000) + decisval
            else:
                gcdis = int(str(gcdval["GCDIssue"])) * 1000
            if gcdis == issis:
                issdate = str(gcdval["GCDDate"])
                int_issnum = int(gcdis / 1000)
                # get the latest issue / date using the date.
                if gcdval["GCDDate"] > latestdate:
                    latestiss = str(issnum)
                    latestdate = str(gcdval["GCDDate"])
                    break
                # bb = iscnt
            bb += 1
        # print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate))
        # ---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone()

        # Only change the status & add DateAdded if the issue is already in the database
        if iss_exists is None:
            newValueDict["DateAdded"] = helpers.today()

        controlValueDict = {"IssueID": issid}
        newValueDict = {
            "ComicID": comicid,
            "ComicName": comic["ComicName"],
            "IssueName": issname,
            "Issue_Number": issnum,
            "IssueDate": issdate,
            "Int_IssueNumber": int_issnum,
        }
        if mylar.AUTOWANT_ALL:
            newValueDict["Status"] = "Wanted"
            # elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING:
            #    newValueDict['Status'] = "Wanted"
        else:
            newValueDict["Status"] = "Skipped"

        if iss_exists:
            # print ("Existing status : " + str(iss_exists['Status']))
            newValueDict["Status"] = iss_exists["Status"]

        myDB.upsert("issues", newValueDict, controlValueDict)
        n += 1

    #        logger.debug(u"Updating comic cache for " + comic['ComicName'])
    #        cache.getThumb(ComicID=issue['issueid'])

    #        logger.debug(u"Updating cache for: " + comic['ComicName'])
    #        cache.getThumb(ComicIDcomicid)

    # check for existing files...
    updater.forceRescan(comicid)

    controlValueStat = {"ComicID": comicid}
    newValueStat = {
        "Status": "Active",
        "LatestIssue": latestiss,
        "LatestDate": latestdate,
        "LastUpdated": helpers.now(),
    }

    myDB.upsert("comics", newValueStat, controlValueStat)

    logger.info(u"Updating complete for: " + comic["ComicName"])

    # lets' check the pullist for anyting at this time as well since we're here.
    if mylar.AUTOWANT_UPCOMING:
        logger.info(u"Checking this week's pullist for new issues of " + str(comic["ComicName"]))
        updater.newpullcheck()

    # here we grab issues that have been marked as wanted above...

    results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid])
    if results:
        logger.info(u"Attempting to grab wanted issues for : " + comic["ComicName"])

        for result in results:
            foundNZB = "none"
            if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
                foundNZB = search.searchforissue(result["IssueID"])
                if foundNZB == "yes":
                    updater.foundsearch(result["ComicID"], result["IssueID"])
    else:
        logger.info(u"No issues marked as wanted for " + comic["ComicName"])

    logger.info(u"Finished grabbing what I could.")
コード例 #31
0
ファイル: weeklypull.py プロジェクト: pyntel/mylar
def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None):
    logger.info(u"Checking the Weekly Releases list for comics I'm watching...")
    myDB = db.DBConnection()

    not_t = ["TP", "NA", "HC", "PI"]

    not_c = ["PTG", "COMBO PACK", "(PP #"]

    lines = []
    unlines = []
    llen = []
    ccname = []
    pubdate = []
    w = 0
    tot = 0
    chkout = []
    watchfnd = []
    watchfndiss = []
    watchfndextra = []

    # print ("----------WATCHLIST--------")
    a_list = []
    b_list = []
    comicid = []

    mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")

    con = sqlite3.connect(str(mylardb))

    with con:

        cur = con.cursor()
        # if it's a one-off check (during an add series), load the comicname here and ignore below.
        if comic1off_name:
            lines.append(comic1off_name.strip())
            unlines.append(comic1off_name.strip())
            comicid.append(comic1off_id)
            w = 1
        else:
            # let's read in the comic.watchlist from the db here
            cur.execute("SELECT ComicID, ComicName, ComicYear, ComicPublisher, ComicPublished from comics")
            while True:
                watchd = cur.fetchone()
                # print ("watchd: " + str(watchd))
                if watchd is None:
                    break
                if "Present" in watchd[4] or (helpers.now()[:4] in watchd[4]):
                    # let's not even bother with comics that are in the Present.
                    a_list.append(watchd[1])
                    b_list.append(watchd[2])
                    comicid.append(watchd[0])
                    pubdate.append(watchd[4])
                    # print ( "Comic:" + str(a_list[w]) + " Year: " + str(b_list[w]) )
                    # if "WOLVERINE AND THE X-MEN" in str(a_list[w]): a_list[w] = "WOLVERINE AND X-MEN"
                    lines.append(a_list[w].strip())
                    unlines.append(a_list[w].strip())
                    llen.append(a_list[w].splitlines())
                    ccname.append(a_list[w].strip())
                    tmpwords = a_list[w].split(None)
                    ltmpwords = len(tmpwords)
                    ltmp = 1
                    w += 1
        cnt = int(w - 1)
        cntback = int(w - 1)
        kp = []
        ki = []
        kc = []
        otot = 0

        logger.fdebug("You are watching for: " + str(w) + " comics")
        # print ("----------THIS WEEK'S PUBLISHED COMICS------------")
        if w > 0:
            while cnt > -1:
                lines[cnt] = lines[cnt].upper()
                # llen[cnt] = str(llen[cnt])
                logger.fdebug("looking for : " + str(lines[cnt]))
                sqlsearch = re.sub("[\_\#\,\/\:\;\.\-\!\$\%\&'\?\@]", " ", lines[cnt])
                sqlsearch = re.sub(r"\s", "%", sqlsearch)
                if "THE" in sqlsearch:
                    sqlsearch = re.sub("THE", "", sqlsearch)
                if "+" in sqlsearch:
                    sqlsearch = re.sub("\+", "%PLUS%", sqlsearch)
                logger.fdebug("searchsql: " + str(sqlsearch))
                weekly = myDB.select(
                    "SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)", [sqlsearch]
                )
                # cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
                for week in weekly:
                    if week == None:
                        break
                    for nono in not_t:
                        if nono in week["PUBLISHER"]:
                            logger.fdebug("nono present")
                            break
                        if nono in week["ISSUE"]:
                            # logger.fdebug("graphic novel/tradeback detected..ignoring.")
                            break
                        for nothere in not_c:
                            if nothere in week["EXTRA"]:
                                # logger.fdebug("nothere present")
                                break
                            else:
                                comicnm = week["COMIC"]
                                # here's the tricky part, ie. BATMAN will match on
                                # every batman comic, not exact
                                # logger.fdebug("comparing" + str(comicnm) + "..to.." + str(unlines[cnt]).upper())

                                # -NEW-
                                # strip out all special characters and compare
                                watchcomic = re.sub("[\_\#\,\/\:\;\.\-\!\$\%\&'\?\@]", "", unlines[cnt])
                                comicnm = re.sub("[\_\#\,\/\:\;\.\-\!\$\%\&'\?\@]", "", comicnm)
                                watchcomic = re.sub(r"\s", "", watchcomic)
                                comicnm = re.sub(r"\s", "", comicnm)
                                # logger.fdebug("Revised_Watch: " + watchcomic)
                                # logger.fdebug("ComicNM: " + comicnm)
                                if "THE" in watchcomic.upper():
                                    modwatchcomic = re.sub("THE", "", watchcomic.upper())
                                    modcomicnm = re.sub("THE", "", comicnm)
                                else:
                                    modwatchcomic = watchcomic
                                    modcomicnm = comicnm
                                # thnx to A+X for this...
                                if "+" in watchcomic:
                                    logger.fdebug("+ detected...adjusting.")
                                    logger.fdebug("comicnm:" + comicnm)
                                    logger.fdebug("watchcomic:" + watchcomic)
                                    modwatchcomic = re.sub("\+", "PLUS", modwatchcomic)
                                    logger.fdebug("modcomicnm:" + modcomicnm)
                                    logger.fdebug("modwatchcomic:" + modwatchcomic)
                                if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper():
                                    logger.fdebug("matched on:" + str(comicnm) + "..." + str(watchcomic).upper())
                                    pass
                                elif "ANNUAL" in week["EXTRA"]:
                                    pass
                                    # print ( row[3] + " matched on ANNUAL")
                                else:
                                    break
                                if ("NA" not in week["ISSUE"]) and ("HC" not in week["ISSUE"]):
                                    if (
                                        ("COMBO PACK" not in week["EXTRA"])
                                        and ("2ND PTG" not in week["EXTRA"])
                                        and ("3RD PTG" not in week["EXTRA"])
                                    ):
                                        otot += 1
                                        dontadd = "no"
                                        if dontadd == "no":
                                            # print (row[0], row[1], row[2])
                                            tot += 1
                                            # kp.append(row[0])
                                            # ki.append(row[1])
                                            # kc.append(comicnm)
                                            if "ANNUAL" in week["EXTRA"]:
                                                watchfndextra.append("annual")
                                            else:
                                                watchfndextra.append("none")
                                            watchfnd.append(comicnm)
                                            watchfndiss.append(week["ISSUE"])
                                            ComicID = comicid[cnt]
                                            if not mylar.CV_ONLY:
                                                ComicIssue = str(watchfndiss[tot - 1] + ".00")
                                            else:
                                                ComicIssue = str(watchfndiss[tot - 1])
                                            ComicDate = str(week["SHIPDATE"])
                                            ComicName = str(unlines[cnt])
                                            logger.fdebug(
                                                "Watchlist hit for : "
                                                + ComicName
                                                + " ISSUE: "
                                                + str(watchfndiss[tot - 1])
                                            )
                                            # here we add to comics.latest
                                            updater.latest_update(
                                                ComicID=ComicID, LatestIssue=ComicIssue, LatestDate=ComicDate
                                            )
                                            # here we add to upcoming table...
                                            statusupdate = updater.upcoming_update(
                                                ComicID=ComicID,
                                                ComicName=ComicName,
                                                IssueNumber=ComicIssue,
                                                IssueDate=ComicDate,
                                                forcecheck=forcecheck,
                                            )
                                            # here we update status of weekly table...
                                            updater.weekly_update(
                                                ComicName=week["COMIC"], IssueNumber=ComicIssue, CStatus=statusupdate
                                            )
                                            break
                                        break
                        break
                cnt -= 1
        # print ("-------------------------")
        logger.fdebug("There are " + str(otot) + " comics this week to get!")
        # print ("However I've already grabbed " + str(btotal) )
        # print ("I need to get " + str(tot) + " comic(s)!" )
        logger.info(u"Finished checking for comics on my watchlist.")
    # con.close()
    return