Example #1
0
def loaditup(comicname, comicid, issue, chktype):
    myDB = db.DBConnection()
    issue_number = helpers.issuedigits(issue)
    if chktype == 'annual':
        typedisplay = 'annual issue'
        logger.fdebug('[' + comicname + '] trying to locate ' + str(typedisplay) + ' ' + str(issue) + ' to do comparitive issue analysis for pull-list')
        issueload = myDB.selectone('SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone()
    else:
        typedisplay = 'issue'
        logger.fdebug('[' + comicname + '] trying to locate ' + str(typedisplay) + ' ' + str(issue) + ' to do comparitive issue analysis for pull-list')
        issueload = myDB.selectone('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone()

    if issueload is None:
        logger.fdebug('No results matched for Issue number - either this is a NEW issue with no data yet, or something is wrong')
        return 'no results'

    dataissue = []    
    releasedate = issueload['ReleaseDate']
    storedate = issueload['IssueDate']
    status = issueload['Status']

    if releasedate == '0000-00-00':
        logger.fdebug('Store date of 0000-00-00 returned for ' + str(typedisplay) + ' # ' + str(issue) + '. Refreshing series to see if valid date present')
        mismatch = 'no'
        #issuerecheck = mylar.importer.addComictoDB(comicid,mismatch,calledfrom='weekly',issuechk=issue_number,issuetype=chktype)
        issuerecheck = mylar.importer.updateissuedata(comicid,comicname,calledfrom='weekly',issuechk=issue_number,issuetype=chktype)
        if issuerecheck is not None:
            for il in issuerecheck:
                #this is only one record..
                releasedate = il['IssueDate']
                storedate = il['ReleaseDate']
                #status = il['Status']
            logger.fdebug('issue-recheck releasedate is : ' + str(releasedate))
            logger.fdebug('issue-recheck storedate of : ' + str(storedate))

    if releasedate is not None and releasedate != "None" and releasedate != "":
        logger.fdebug('Returning Release Date for ' + str(typedisplay) + ' # ' + str(issue) + ' of ' + str(releasedate))
        thedate = re.sub("[^0-9]", "", releasedate)  #convert date to numerics only (should be in yyyymmdd)
        #return releasedate
    else:
        logger.fdebug('Returning Publication Date for issue ' + str(typedisplay) + ' # ' + str(issue) + ' of ' + str(storedate))
        if storedate is None and storedate != "None" and storedate != "":
            logger.fdebug('no issue data available - both release date & store date. Returning no results')
            return 'no results'
        thedate = re.sub("[^0-9]", "", storedate)  #convert date to numerics only (should be in yyyymmdd)
        #return storedate

    dataissue.append({"issuedate":  thedate,
                      "status":     status})

    return dataissue
Example #2
0
def loaditup(comicname, comicid, issue, chktype):
    myDB = db.DBConnection()
    issue_number = helpers.issuedigits(issue)
    if chktype == 'annual':
        typedisplay = 'annual issue'
        logger.fdebug('[' + comicname + '] trying to locate ' + str(typedisplay) + ' ' + str(issue) + ' to do comparitive issue analysis for pull-list')
        issueload = myDB.action('SELECT * FROM annuals WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone()
    else:
        typedisplay = 'issue'
        logger.fdebug('[' + comicname + '] trying to locate ' + str(typedisplay) + ' ' + str(issue) + ' to do comparitive issue analysis for pull-list')
        issueload = myDB.action('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone()

    if issueload is None:
        logger.fdebug('No results matched for Issue number - either this is a NEW issue with no data yet, or something is wrong')
        return 'no results'

    dataissue = []    
    releasedate = issueload['ReleaseDate']
    storedate = issueload['IssueDate']
    status = issueload['Status']

    if releasedate == '0000-00-00':
        logger.fdebug('Store date of 0000-00-00 returned for ' + str(typedisplay) + ' # ' + str(issue) + '. Refreshing series to see if valid date present')
        mismatch = 'no'
        issuerecheck = mylar.importer.addComictoDB(comicid,mismatch,calledfrom='weekly',issuechk=issue_number,issuetype=chktype)
        if issuerecheck is not None:
            for il in issuerecheck:
                #this is only one record..
                releasedate = il['IssueDate']
                storedate = il['ReleaseDate']
                status = il['Status']
            logger.fdebug('issue-recheck releasedate is : ' + str(releasedate))
            logger.fdebug('issue-recheck storedate of : ' + str(storedate))

    if releasedate is not None and releasedate != "None" and releasedate != "":
        logger.fdebug('Returning Release Date for ' + str(typedisplay) + ' # ' + str(issue) + ' of ' + str(releasedate))
        thedate = re.sub("[^0-9]", "", releasedate)  #convert date to numerics only (should be in yyyymmdd)
        #return releasedate
    else:
        logger.fdebug('Returning Publication Date for issue ' + str(typedisplay) + ' # ' + str(issue) + ' of ' + str(storedate))
        if storedate is None and storedate != "None" and storedate != "":
            logger.fdebug('no issue data available - both release date & store date. Returning no results')
            return 'no results'
        thedate = re.sub("[^0-9]", "", storedate)  #convert date to numerics only (should be in yyyymmdd)
        #return storedate

    dataissue.append({"issuedate":  thedate,
                      "status":     status})

    return dataissue
Example #3
0
def loaditup(comicname, comicid, issue):
    myDB = db.DBConnection()
    issue_number = helpers.issuedigits(issue)
    logger.fdebug('[' + comicname + '] trying to locate issue ' + str(issue) + ' to do comparitive issue analysis for pull-list')
    issueload = myDB.action('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [comicid, issue_number]).fetchone()
    if issueload is None:
        logger.fdebug('No results matched for Issue number - either this is a NEW series with no data yet, or something is wrong')
        return 'no results'
    if issueload['ReleaseDate'] is not None or issueload['ReleaseDate'] is not 'None':
        logger.fdebug('Returning Release Date for issue # ' + str(issue) + ' of ' + str(issueload['ReleaseDate']))
        return issueload['ReleaseDate']
    else:
        logger.fdebug('Returning Publication Date for issue # ' + str(issue) + ' of ' + str(issueload['PublicationDate']))
        return issueload['PublicationDate']
Example #4
0
def loaditup(comicname, comicid, issue):
    myDB = db.DBConnection()
    issue_number = helpers.issuedigits(issue)
    logger.fdebug('[' + comicname + '] trying to locate issue ' + str(issue) +
                  ' to do comparitive issue analysis for pull-list')
    issueload = myDB.action(
        'SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?',
        [comicid, issue_number]).fetchone()
    if issueload is None:
        logger.fdebug(
            'No results matched for Issue number - either this is a NEW series with no data yet, or something is wrong'
        )
        return 'no results'
    if issueload['ReleaseDate'] is not None or issueload[
            'ReleaseDate'] is not 'None':
        logger.fdebug('Returning Release Date for issue # ' + str(issue) +
                      ' of ' + str(issueload['ReleaseDate']))
        return issueload['ReleaseDate']
    else:
        logger.fdebug('Returning Publication Date for issue # ' + str(issue) +
                      ' of ' + str(issueload['PublicationDate']))
        return issueload['PublicationDate']
Example #5
0
def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None, futurepull=None, issue=None):
    if futurepull is None:
        logger.info(u"Checking the Weekly Releases list for comics I'm watching...")
    else:
        logger.info('Checking the Future Releases list for upcoming comics I am watching for...')
    myDB = db.DBConnection()

    not_t = ['TP',
             'NA',
             'HC',
             'PI']

    not_c = ['PTG',
             'COMBO PACK',
             '(PP #']

    lines = []
    unlines = []
    llen = []
    ccname = []
    pubdate = []
    latestissue = []
    w = 0
    wc = 0
    tot = 0
    chkout = []
    watchfnd = []
    watchfndiss = []
    watchfndextra = []
    alternate = []

    #print ("----------WATCHLIST--------")
    a_list = []
    b_list = []
    comicid = []

    mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")

    con = sqlite3.connect(str(mylardb))

    with con:

        cur = con.cursor()
        # if it's a one-off check (during an add series), load the comicname here and ignore below.
        if comic1off_name:
            logger.fdebug("this is a one-off" + comic1off_name)
            lines.append(comic1off_name.strip())
            unlines.append(comic1off_name.strip())
            comicid.append(comic1off_id)
            latestissue.append(issue)
            w = 1            
        else:
            #let's read in the comic.watchlist from the db here
            cur.execute("SELECT ComicID, ComicName, ComicYear, ComicPublisher, ComicPublished, LatestDate, ForceContinuing, AlternateSearch, LatestIssue from comics")
            while True:
                watchd = cur.fetchone()
                #print ("watchd: " + str(watchd))
                if watchd is None:
                    break
                if 'Present' in watchd[4] or (helpers.now()[:4] in watchd[4]) or watchd[6] == 1:
                 # this gets buggered up when series are named the same, and one ends in the current
                 # year, and the new series starts in the same year - ie. Avengers
                 # lets' grab the latest issue date and see how far it is from current
                 # anything > 45 days we'll assume it's a false match ;)
                    logger.fdebug("ComicName: " + watchd[1])
                    latestdate = watchd[5]
                    logger.fdebug("latestdate:  " + str(latestdate))
                    if latestdate[8:] == '':
                        logger.fdebug("invalid date " + str(latestdate) + " appending 01 for day for continuation.")
                        latest_day = '01'
                    else:
                        latest_day = latestdate[8:]
                    c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),int(latest_day))
                    n_date = datetime.date.today()
                    logger.fdebug("c_date : " + str(c_date) + " ... n_date : " + str(n_date))
                    recentchk = (n_date - c_date).days
                    logger.fdebug("recentchk: " + str(recentchk) + " days")
                    chklimit = helpers.checkthepub(watchd[0])
                    logger.fdebug("Check date limit set to : " + str(chklimit))
                    logger.fdebug(" ----- ")
                    if recentchk < int(chklimit) or watchd[6] == 1:
                        if watchd[6] == 1:
                            logger.fdebug('Forcing Continuing Series enabled for series...')
                        # let's not even bother with comics that are not in the Present.
                        a_list.append(watchd[1])
                        b_list.append(watchd[2])
                        comicid.append(watchd[0])
                        pubdate.append(watchd[4])
                        latestissue.append(watchd[8])
                        lines.append(a_list[w].strip())
                        unlines.append(a_list[w].strip())
                        w+=1   # we need to increment the count here, so we don't count the same comics twice (albeit with alternate names)

                        #here we load in the alternate search names for a series and assign them the comicid and
                        #alternate names
                        Altload = helpers.LoadAlternateSearchNames(watchd[7], watchd[0])
                        if Altload == 'no results':
                            pass
                        else:
                            wc = 0 
                            alt_cid = Altload['ComicID']
                            n = 0
                            iscnt = Altload['Count']
                            while (n <= iscnt):
                                try:
                                    altval = Altload['AlternateName'][n]
                                except IndexError:
                                    break
                                cleanedname = altval['AlternateName']
                                a_list.append(altval['AlternateName'])
                                b_list.append(watchd[2])
                                comicid.append(alt_cid)
                                pubdate.append(watchd[4])
                                latestissue.append(watchd[8])
                                lines.append(a_list[w+wc].strip())
                                unlines.append(a_list[w+wc].strip())
                                logger.fdebug('loading in Alternate name for ' + str(cleanedname))
                                n+=1
                                wc+=1
                            w+=wc

                #-- to be removed - 
                        #print ( "Comic:" + str(a_list[w]) + " Year: " + str(b_list[w]) )
                        #if "WOLVERINE AND THE X-MEN" in str(a_list[w]): a_list[w] = "WOLVERINE AND X-MEN"
                        #lines.append(a_list[w].strip())
                        #unlines.append(a_list[w].strip())
                        #llen.append(a_list[w].splitlines())
                        #ccname.append(a_list[w].strip())
                        #tmpwords = a_list[w].split(None)
                        #ltmpwords = len(tmpwords)
                        #ltmp = 1
                #-- end to be removed
                    else:
                        logger.fdebug("Determined to not be a Continuing series at this time.")    
        cnt = int(w-1)
        cntback = int(w-1)
        kp = []
        ki = []
        kc = []
        otot = 0

        logger.fdebug("You are watching for: " + str(w) + " comics")
        #print ("----------THIS WEEK'S PUBLISHED COMICS------------")
        if w > 0:
            while (cnt > -1):
                latestiss = latestissue[cnt]
                lines[cnt] = lines[cnt].upper()
                #llen[cnt] = str(llen[cnt])
                logger.fdebug("looking for : " + lines[cnt])
                sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', ' ', lines[cnt])
                sqlsearch = re.sub("\&", '%', sqlsearch)
                sqlsearch = re.sub("\\bAND\\b", '%', sqlsearch)
                sqlsearch = re.sub("\\bTHE\\b", '', sqlsearch)
                if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch)
                sqlsearch = re.sub(r'\s', '%', sqlsearch)
                sqlsearch = sqlsearch + '%'
                #logger.fdebug("searchsql: " + sqlsearch)
                if futurepull is None:
                    weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [sqlsearch])
                else:
                    weekly = myDB.select('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM future WHERE COMIC LIKE (?)', [sqlsearch])
                #cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
                for week in weekly:
                    if week == None:
                        break
                    for nono in not_t:
                        if nono in week['PUBLISHER']:
                            #logger.fdebug("nono present")
                            break
                        if nono in week['ISSUE']:
                            #logger.fdebug("graphic novel/tradeback detected..ignoring.")
                            break
                        for nothere in not_c:
                            if nothere in week['EXTRA']:
                                #logger.fdebug("nothere present")
                                break
                            else:
                                comicnm = week['COMIC']
                                #here's the tricky part, ie. BATMAN will match on
                                #every batman comic, not exact
                                logger.fdebug("comparing" + comicnm + "..to.." + unlines[cnt].upper())

                                #-NEW-
                                # strip out all special characters and compare
                                watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', '', unlines[cnt])
                                comicnm = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\'\?\@]', '', comicnm)
                                if "THE" in watchcomic.upper() or "THE" in comicnm.upper():
                                    modwatchcomic = re.sub("\\bTHE\\b", "", watchcomic.upper())
                                    modcomicnm = re.sub("\\bTHE\\b", "", comicnm)
                                else:
                                    modwatchcomic = watchcomic
                                    modcomicnm = comicnm
                                if '&' in watchcomic.upper():
                                    modwatchcomic = re.sub('\&', 'AND', modwatchcomic.upper())
                                    modcomicnm = re.sub('\&', 'AND', modcomicnm)
                                if '&' in comicnm:
                                    modwatchcom = re.sub('\&', 'AND', modwatchcomic.upper())
                                    modcomicnm = re.sub('\&', 'AND', modcomicnm)
                                #thnx to A+X for this...
                                if '+' in watchcomic:
                                    logger.fdebug("+ detected...adjusting.")
                                    #logger.fdebug("comicnm:" + comicnm)
                                    #logger.fdebug("watchcomic:" + watchcomic)
                                    modwatchcomic = re.sub('\+', 'PLUS', modwatchcomic)
                                    #logger.fdebug("modcomicnm:" + modcomicnm)
                                    #logger.fdebug("modwatchcomic:" + modwatchcomic)

                                #annuals!
                                if 'ANNUAL' in comicnm.upper(): 
                                    modcomicnm = re.sub("\\bANNUAL\\b", "", modcomicnm.upper())

                                watchcomic = re.sub(r'\s', '', watchcomic)
                                comicnm = re.sub(r'\s', '', comicnm)
                                modwatchcomic = re.sub(r'\s', '', modwatchcomic)
                                modcomicnm = re.sub(r'\s', '', modcomicnm)
                                logger.fdebug("watchcomic : " + str(watchcomic) + " / mod :" + str(modwatchcomic))
                                logger.fdebug("comicnm : " + str(comicnm) + " / mod :" + str(modcomicnm))

                                if comicnm == watchcomic.upper() or modcomicnm == modwatchcomic.upper():
                                    logger.fdebug("matched on:" + comicnm + "..." + watchcomic.upper())
                                    pass
#                                elif ("ANNUAL" in week['EXTRA']):
#                                    pass
#                                    print ( row[3] + " matched on ANNUAL")
                                else:
                                    break


                                if ("NA" not in week['ISSUE']) and ("HC" not in week['ISSUE']):
                                    if ("COMBO PACK" not in week['EXTRA']) and ("2ND PTG" not in week['EXTRA']) and ("3RD PTG" not in week['EXTRA']):

                                    #this all needs to get redone, so the ability to compare issue dates can be done systematically.
                                    #Everything below should be in it's own function - at least the callable sections - in doing so, we can
                                    #then do comparisons when two titles of the same name exist and are by definition 'current'. Issue date comparisons
                                    #would identify the difference between two #1 titles within the same series year, but have different publishing dates.
                                    #Wolverine (2013) & Wolverine (2014) are good examples of this situation.
                                    #of course initially, the issue data for the newer series wouldn't have any issue data associated with it so it would be
                                    #a null value, but given that the 2013 series (as an example) would be from 2013-05-01, it obviously wouldn't be a match to
                                    #the current date & year (2014). Throwing out that, we could just assume that the 2014 would match the #1.

                                    #get the issue number of the 'weeklypull' series.
                                    #load in the actual series issue number's store-date (not publishing date)
                                    #---use a function to check db, then return the results in a tuple/list to avoid db locks.
                                    #if the store-date is >= weeklypull-list date then continue processing below.
                                    #if the store-date is <= weeklypull-list date then break.
                                    ### week['ISSUE']  #issue # from pullist
                                    ### week['SHIPDATE']  #weeklypull-list date
                                    ### comicid[cnt] #comicid of matched series                                                                

                                    ## if it's a futurepull, the dates get mixed up when two titles exist of the same name
                                    ## ie. Wolverine-2011 & Wolverine-2014
                                    ## we need to set the compare date to today's date ( Now() ) in this case.
                                        if futurepull:
                                            usedate = datetime.datetime.now().strftime('%Y%m%d')  #convert to yyyymmdd
                                        else:
                                            usedate = re.sub("[^0-9]", "", week['SHIPDATE'])

                                        if 'ANNUAL' in comicnm.upper():
                                            chktype = 'annual'
                                        else:
                                            chktype = 'series' 
                             
                                        datevalues = loaditup(watchcomic, comicid[cnt], week['ISSUE'], chktype)

                                        date_downloaded = None
                                        altissuenum = None

                                        if datevalues == 'no results':
                                        #if a series is a .NOW on the pullist, it won't match up against anything (probably) on CV
                                        #let's grab the digit from the .NOW, poll it against CV to see if there's any data
                                        #if there is, check the store date to make sure it's a 'new' release.
                                        #if it is a new release that has the same store date as the .NOW, then we assume
                                        #it's the same, and assign it the AltIssueNumber to do extra searches.
                                            if week['ISSUE'].isdigit() == False and '.' not in week['ISSUE']:
                                                altissuenum = re.sub("[^0-9]", "", week['ISSUE'])  # carry this through to get added to db later if matches
                                                logger.fdebug('altissuenum is: ' + str(altissuenum))
                                                altvalues = loaditup(watchcomic, comicid[cnt], altissuenum, chktype)
                                                if altvalues == 'no results':
                                                    logger.fdebug('No alternate Issue numbering - something is probably wrong somewhere.')
                                                    pass

                                                validcheck = checkthis(altvalues[0]['issuedate'], altvalues[0]['status'], usedate)
                                                if validcheck == False:
                                                    if date_downloaded is None:
                                                        break
                                            if chktype == 'series': 
                                                latest_int = helpers.issuedigits(latestiss)
                                                weekiss_int = helpers.issuedigits(week['ISSUE'])
                                                logger.fdebug('comparing ' + str(latest_int) + ' to ' + str(weekiss_int))
                                                if (latest_int > weekiss_int) or (latest_int == 0 or weekiss_int == 0):
                                                    logger.fdebug(str(week['ISSUE']) + ' should not be the next issue in THIS volume of the series.')
                                                    logger.fdebug('it should be either greater than ' + str(latestiss) + ' or an issue #0')
                                                    break

                                        else:
                                            #logger.fdebug('issuedate:' + str(datevalues[0]['issuedate']))
                                            #logger.fdebug('status:' + str(datevalues[0]['status']))
                                            datestatus = datevalues[0]['status']
                                            validcheck = checkthis(datevalues[0]['issuedate'], datestatus, usedate)
                                            if validcheck == True:
                                                if datestatus != 'Downloaded' and datestatus != 'Archived':
                                                    pass
                                                else:
                                                    logger.fdebug('Issue #' + str(week['ISSUE']) + ' already downloaded.')
                                                    date_downloaded = datestatus
                                            else:
                                                if date_downloaded is None:
                                                    break

                                        otot+=1
                                        dontadd = "no"
                                        if dontadd == "no":
                                            #print (row[0], row[1], row[2])
                                            tot+=1
                                            #kp.append(row[0])
                                            #ki.append(row[1])
                                            #kc.append(comicnm)
                                            if "ANNUAL" in comicnm.upper():
                                                watchfndextra.append("annual")
                                                ComicName = str(unlines[cnt]) + " Annual"
                                            else:
                                                ComicName = str(unlines[cnt])
                                                watchfndextra.append("none")
                                            watchfnd.append(comicnm)
                                            watchfndiss.append(week['ISSUE'])
                                            ComicID = comicid[cnt]
                                            if not mylar.CV_ONLY:
                                                ComicIssue = str(watchfndiss[tot -1] + ".00")
                                            else:
                                                ComicIssue = str(watchfndiss[tot -1])
                                            ComicDate = str(week['SHIPDATE'])
                                            #ComicName = str(unlines[cnt])
                                            logger.fdebug("Watchlist hit for : " + ComicName + " ISSUE: " + str(watchfndiss[tot -1]))

                                            if futurepull is None:
                                               # here we add to comics.latest
                                                updater.latest_update(ComicID=ComicID, LatestIssue=ComicIssue, LatestDate=ComicDate)
                                                # here we add to upcoming table...
                                                statusupdate = updater.upcoming_update(ComicID=ComicID, ComicName=ComicName, IssueNumber=ComicIssue, IssueDate=ComicDate, forcecheck=forcecheck)
                                            else:
                                                # here we add to upcoming table...
                                                statusupdate = updater.upcoming_update(ComicID=ComicID, ComicName=ComicName, IssueNumber=ComicIssue, IssueDate=ComicDate, forcecheck=forcecheck, futurepull='yes', altissuenumber=altissuenum)

                                            # here we update status of weekly table...
                                            if statusupdate is not None:
                                                cstatus = statusupdate['Status']
                                                cstatusid = statusupdate['ComicID']
                                            else:
                                                cstatus = None
                                                cstatusid = None
                                            #set the variable fp to denote updating the futurepull list ONLY
                                            if futurepull is None: 
                                                fp = None
                                            else: 
                                                cstatusid = ComicID
                                                fp = "yes"

                                            if date_downloaded is None:
                                                updater.weekly_update(ComicName=week['COMIC'], IssueNumber=ComicIssue, CStatus=cstatus, CID=cstatusid, futurepull=fp, altissuenumber=altissuenum)
                                            else:
                                                updater.weekly_update(ComicName=week['COMIC'], IssueNumber=ComicIssue, CStatus=date_downloaded, CID=cstatusid, futurepull=fp, altissuenumber=altissuenum)
                                            break
                                        break
                        break
                cnt-=1
        #print ("-------------------------")
        logger.fdebug("There are " + str(otot) + " comics this week to get!")
        #print ("However I've already grabbed " + str(btotal) )
        #print ("I need to get " + str(tot) + " comic(s)!" )
        logger.info(u"Finished checking for comics on my watchlist.")
    #con.close()
    return
Example #6
0
    def Process(self):
            self._log("nzb name: " + str(self.nzb_name))
            self._log("nzb folder: " + str(self.nzb_folder))
            logger.fdebug("nzb name: " + str(self.nzb_name))
            logger.fdebug("nzb folder: " + str(self.nzb_folder))
            if mylar.USE_SABNZBD==0:
                logger.fdebug("Not using SABnzbd")
            elif mylar.USE_SABNZBD != 0 and self.nzb_name == 'Manual Run':
                logger.fdebug('Not using SABnzbd : Manual Run')
            else:
                # if the SAB Directory option is enabled, let's use that folder name and append the jobname.
                if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4:
                    self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
                    logger.fdebug('SABnzbd Download folder option enabled. Directory set to : ' + self.nzb_folder)

      # -- start. not used.
                #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
                #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
                #querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
                #logger.info("querysab_string:" + str(querysab))
                #file = urllib2.urlopen(querysab)
                #data = file.read()
                #file.close()
                #dom = parseString(data)

                #try:
                #    sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
                #except:
                #    errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText
                #    logger.error(u"Error detected attempting to retrieve SAB data : " + errorm)
                #    return
                #sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
                #logger.fdebug("SAB Replace Spaces: " + str(sabreps))
                #logger.fdebug("SAB Replace Dots: " + str(sabrepd))
         # -- end. not used.

            if mylar.USE_NZBGET==1:
                if self.nzb_name != 'Manual Run': 
                    logger.fdebug("Using NZBGET")
                logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
                # if the NZBGet Directory option is enabled, let's use that folder name and append the jobname.
                if self.nzb_name == 'Manual Run':
                    logger.fdebug('Manual Run Post-Processing enabled.')
                elif mylar.NZBGET_DIRECTORY is not None and mylar.NZBGET_DIRECTORY is not 'None' and len(mylar.NZBGET_DIRECTORY) > 4:
                    self.nzb_folder = os.path.join(mylar.NZBGET_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
                    logger.fdebug('NZBGET Download folder option enabled. Directory set to : ' + self.nzb_folder)
            myDB = db.DBConnection()

            if self.nzb_name == 'Manual Run':
                logger.fdebug ("manual run initiated")
                #Manual postprocessing on a folder.
                #use the nzb_folder to determine every file
                #walk the dir,
                #once a series name and issue are matched,
                #write the series/issue/filename to a tuple
                #when all done, iterate over the tuple until completion...
                comicseries = myDB.select("SELECT * FROM comics")
                manual_list = []
                if comicseries is None: 
                    logger.error(u"No Series in Watchlist - aborting Manual Post Processing. Maybe you should be running Import?")
                    return
                else:
                    ccnt=0
                    nm=0
                    watchvals = {}
                    for cs in comicseries:
                        watchvals = {"SeriesYear":   cs['ComicYear'],
                                     "LatestDate":   cs['LatestDate'],
                                     "ComicVersion": cs['ComicVersion'],
                                     "Publisher":    cs['ComicPublisher'],
                                     "Total":        cs['Total']}
                        watchmatch = filechecker.listFiles(self.nzb_folder,cs['ComicName'],cs['ComicPublisher'],cs['AlternateSearch'], manual=watchvals)
                        if watchmatch['comiccount'] == 0: # is None:
                            nm+=1
                            continue
                        else:
                            fn = 0
                            fccnt = int(watchmatch['comiccount'])
                            if len(watchmatch) == 1: continue
                            while (fn < fccnt):
                                try:
                                    tmpfc = watchmatch['comiclist'][fn]
                                except IndexError,KeyError:
                                    break
                                temploc= tmpfc['JusttheDigits'].replace('_', ' ')
                                temploc = re.sub('[\#\']', '', temploc)

                                if 'annual' in temploc.lower():
                                    logger.info("annual detected.")
                                    annchk = "yes"
                                    fcdigit = helpers.issuedigits(re.sub('annual', '', str(temploc.lower())).strip())
                                    issuechk = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
                                else:
                                    fcdigit = helpers.issuedigits(temploc)
                                    issuechk = myDB.selectone("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()

                                if issuechk is None:
                                    logger.fdebug("No corresponding issue # found for " + str(cs['ComicID']))
                                else:
                                    datematch = "True"
                                    if len(watchmatch) >= 1 and tmpfc['ComicYear'] is not None:
                                        #if the # of matches is more than 1, we need to make sure we get the right series
                                        #compare the ReleaseDate for the issue, to the found issue date in the filename.
                                        #if ReleaseDate doesn't exist, use IssueDate
                                        #if no issue date was found, then ignore.
                                        issyr = None
                                        #logger.fdebug('issuedate:' + str(issuechk['IssueDate']))
                                        #logger.fdebug('issuechk: ' + str(issuechk['IssueDate'][5:7]))

                                        #logger.info('ReleaseDate: ' + str(issuechk['ReleaseDate']))
                                        #logger.info('IssueDate: ' + str(issuechk['IssueDate']))
                                        if issuechk['ReleaseDate'] is not None and issuechk['ReleaseDate'] != '0000-00-00':
                                            monthval = issuechk['ReleaseDate']
                                            if int(issuechk['ReleaseDate'][:4]) < int(tmpfc['ComicYear']):
                                                logger.fdebug(str(issuechk['ReleaseDate']) + ' is before the issue year of ' + str(tmpfc['ComicYear']) + ' that was discovered in the filename')
                                                datematch = "False"
                                                 
                                        else:
                                            monthval = issuechk['IssueDate']
                                            if int(issuechk['IssueDate'][:4]) < int(tmpfc['ComicYear']):
                                                logger.fdebug(str(issuechk['IssueDate']) + ' is before the issue year ' + str(tmpfc['ComicYear']) + ' that was discovered in the filename')
                                                datematch = "False"

                                        if int(monthval[5:7]) == 11 or int(monthval[5:7]) == 12:
                                            issyr = int(monthval[:4]) + 1
                                            logger.fdebug('issyr is ' + str(issyr))
                                        elif int(monthval[5:7]) == 1 or int(monthval[5:7]) == 2:
                                            issyr = int(monthval[:4]) - 1



                                        if datematch == "False" and issyr is not None:
                                            logger.fdebug(str(issyr) + ' comparing to ' + str(tmpfc['ComicYear']) + ' : rechecking by month-check versus year.')
                                            datematch = "True"
                                            if int(issyr) != int(tmpfc['ComicYear']):
                                                logger.fdebug('[fail] Issue is before the modified issue year of ' + str(issyr))
                                                datematch = "False"
                                          
                                    else:
                                        logger.info("Found matching issue # " + str(fcdigit) + " for ComicID: " + str(cs['ComicID']) + " / IssueID: " + str(issuechk['IssueID']))
                                            
                                    if datematch == "True":
                                        manual_list.append({"ComicLocation":   tmpfc['ComicLocation'],
                                                            "ComicID":         cs['ComicID'],
                                                            "IssueID":         issuechk['IssueID'],
                                                            "IssueNumber":     issuechk['Issue_Number'],
                                                            "ComicName":       cs['ComicName']})
                                    else:
                                        logger.fdebug('Incorrect series - not populating..continuing post-processing')
                                    #ccnt+=1

                                fn+=1
                    logger.fdebug("There are " + str(len(manual_list)) + " files found that match on your watchlist, " + str(nm) + " do not match anything and will be ignored.")    
Example #7
0
def forceRescan(ComicID,archive=None):
    myDB = db.DBConnection()
    # file check to see if issue exists
    rescan = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
    logger.info('Now checking files for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') in ' + rescan['ComicLocation'] )
    if archive is None:
        fc = filechecker.listFiles(dir=rescan['ComicLocation'], watchcomic=rescan['ComicName'], AlternateSearch=rescan['AlternateSearch'])
    else:
        fc = filechecker.listFiles(dir=archive, watchcomic=rescan['ComicName'], AlternateSearch=rescan['AlternateSearch'])
    iscnt = rescan['Total']
    havefiles = 0
    if mylar.ANNUALS_ON:
        an_cnt = myDB.action("SELECT COUNT(*) FROM annuals WHERE ComicID=?", [ComicID]).fetchall()
        anncnt = an_cnt[0][0]
    else:
        anncnt = 0
    fccnt = int(fc['comiccount'])
    issnum = 1
    fcnew = []
    fn = 0
    issuedupechk = []
    annualdupechk = []
    issueexceptdupechk = []
    reissues = myDB.action('SELECT * FROM issues WHERE ComicID=?', [ComicID]).fetchall()
    issID_to_ignore = []
    issID_to_ignore.append(str(ComicID))
    while (fn < fccnt):  
        haveissue = "no"
        issuedupe = "no"
        try:
            tmpfc = fc['comiclist'][fn]
        except IndexError:
            logger.fdebug('Unable to properly retrieve a file listing for the given series.')
            logger.fdebug('Probably because the filenames being scanned are not in a parseable format')
            if fn == 0: 
                return
            else:
                break
        temploc= tmpfc['JusttheDigits'].replace('_', ' ')

#        temploc = tmpfc['ComicFilename'].replace('_', ' ')
        temploc = re.sub('[\#\']', '', temploc)
        logger.fdebug('temploc: ' + str(temploc))
        if 'annual' not in temploc.lower():
            #remove the extension here
            extensions = ('.cbr','.cbz')
            if temploc.lower().endswith(extensions):
                logger.fdebug('removed extension for issue: ' + str(temploc))
                temploc = temploc[:-4]
#            deccnt = str(temploc).count('.')
#            if deccnt > 1:
                #logger.fdebug('decimal counts are :' + str(deccnt))
                #if the file is formatted with '.' in place of spaces we need to adjust.
                #before replacing - check to see if digits on either side of decimal and if yes, DON'T REMOVE
#                occur=1
#                prevstart = 0
#                digitfound = "no"
#                decimalfound = "no"
#                tempreconstruct = ''
#                while (occur <= deccnt):
#                    n = occur
#                    start = temploc.find('.')
#                    while start >=0 and n > 1:
#                        start = temploc.find('.', start+len('.'))
#                        n-=1
#                    #logger.fdebug('occurance ' + str(occur) + ' of . at position: ' + str(start))
#                    if temploc[prevstart:start].isdigit():
#                        if digitfound == "yes":
#                            #logger.fdebug('this is a decimal, assuming decimal issue.')
#                            decimalfound = "yes"
#                            reconst = "." + temploc[prevstart:start] + " "
#                        else:
#                            #logger.fdebug('digit detected.')
#                            digitfound = "yes"
#                            reconst = temploc[prevstart:start]
#                    else:
#                        reconst = temploc[prevstart:start] + " "
#                    #logger.fdebug('word: ' + reconst)
#                    tempreconstruct = tempreconstruct + reconst 
#                    #logger.fdebug('tempreconstruct is : ' + tempreconstruct)
#                    prevstart = (start+1)
#                    occur+=1
#                #logger.fdebug('word: ' + temploc[prevstart:])
#                tempreconstruct = tempreconstruct + " " + temploc[prevstart:]
#                #logger.fdebug('final filename to use is : ' + str(tempreconstruct))
#                temploc = tempreconstruct            
            #logger.fdebug("checking " + str(temploc))
            #fcnew_b4 = shlex.split(str(temploc))            
            fcnew_af = re.findall('[^\()]+', temploc)
            fcnew = shlex.split(fcnew_af[0])

            fcn = len(fcnew)
            n = 0
            while (n <= iscnt):
                som = 0
                try:
                    reiss = reissues[n]
                except IndexError:
                    break
#                int_iss, iss_except = helpers.decimal_issue(reiss['Issue_Number'])
                int_iss = helpers.issuedigits(reiss['Issue_Number'])
                issyear = reiss['IssueDate'][:4]
                old_status = reiss['Status']
                issname = reiss['IssueName']
                #logger.fdebug('integer_issue:' + str(int_iss) + ' ... status: ' + str(old_status))

                #if comic in format of "SomeSeries 5(c2c)(2013).cbr" whatever...it'll die.
                #can't distinguish the 5(c2c) to tell it's the issue #...
                fnd_iss_except = 'None'
                #print ("Issue, int_iss, iss_except: " + str(reiss['Issue_Number']) + "," + str(int_iss) + "," + str(iss_except))


                while (som < fcn):
                    #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
                    #logger.fdebug('checking word - ' + str(fcnew[som]))
                    if ".cbr" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace(".cbr", "")
                    elif ".cbz" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace(".cbz", "")
                    if "(c2c)" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace("(c2c)", " ")
                        get_issue = shlex.split(str(fcnew[som]))
                        if fcnew[som] != " ":
                            fcnew[som] = get_issue[0]


                    if som+1 < len(fcnew) and len(fcnew[som+1]) == 2:
                        #print "fcnew[som+1]: " + str(fcnew[som+1])
                        #print "fcnew[som]: " + str(fcnew[som])
                        if 'au' in fcnew[som+1].lower():
                            #if the 'AU' is in 005AU vs 005 AU it will yield different results.
                            fcnew[som] = fcnew[som] + 'AU'
                            fcnew[som+1] = '93939999919190933'
                            logger.info('AU Detected seperate from issue - combining and continuing')
                        elif 'ai' in fcnew[som+1].lower():
                            #if the 'AI' is in 005AI vs 005 AI it will yield different results.
                            fcnew[som] = fcnew[som] + 'AI'
                            fcnew[som+1] = '93939999919190933'
                            logger.info('AI Detected seperate from issue - combining and continuing')

                    #sometimes scanners refuse to use spaces between () and lump the issue right at the start
                    #mylar assumes it's all one word in this case..let's dump the brackets.
                    
                    fcdigit = helpers.issuedigits(fcnew[som])

                    #logger.fdebug("fcdigit: " + str(fcdigit))
                    #logger.fdebug("int_iss: " + str(int_iss))

                    if int(fcdigit) == int_iss:
                        logger.fdebug('issue match - fcdigit: ' + str(fcdigit) + ' ... int_iss: ' + str(int_iss))

                        if '-' in temploc and temploc.find(reiss['Issue_Number']) > temploc.find('-'):
                            logger.fdebug('I have detected a possible Title in the filename')
                            logger.fdebug('the issue # has occured after the -, so I assume that it is part of the Title')
                            break
                        for d in issuedupechk:
                            if int(d['fcdigit']) == int(fcdigit):
                                logger.fdebug('duplicate issue detected - not counting this: ' + str(tmpfc['ComicFilename']))
                                logger.fdebug('is a duplicate of ' + d['filename'])
                                logger.fdebug('fcdigit:' + str(fcdigit) + ' === dupedigit: ' + str(d['fcdigit']))
                                issuedupe = "yes"
                                break
                        if issuedupe == "no":
                            logger.fdebug('matched...issue: ' + rescan['ComicName'] + '#' + str(reiss['Issue_Number']) + ' --- ' + str(int_iss))
                            havefiles+=1
                            haveissue = "yes"
                            isslocation = str(tmpfc['ComicFilename'])
                            issSize = str(tmpfc['ComicSize'])
                            logger.fdebug('.......filename: ' + str(isslocation))
                            logger.fdebug('.......filesize: ' + str(tmpfc['ComicSize'])) 
                            # to avoid duplicate issues which screws up the count...let's store the filename issues then 
                            # compare earlier...
                            issuedupechk.append({'fcdigit': int(fcdigit),
                                                 'filename': tmpfc['ComicFilename']})
                        break
                        #else:
                        # if the issue # matches, but there is no year present - still match.
                        # determine a way to match on year if present, or no year (currently).

                    if issuedupe == "yes":
                        logger.fdebug('I should break out here because of a dupe.')
                        break
                    som+=1
                if haveissue == "yes" or issuedupe == "yes": break
                n+=1
        else:
            # annual inclusion here.
            #logger.fdebug("checking " + str(temploc))
            reannuals = myDB.action('SELECT * FROM annuals WHERE ComicID=?', [ComicID]).fetchall()
            fcnew = shlex.split(str(temploc))
            fcn = len(fcnew)
            n = 0
            while (n < anncnt):
                som = 0
                try:
                    reann = reannuals[n]
                except IndexError:
                    break
                int_iss, iss_except = helpers.decimal_issue(reann['Issue_Number'])
                issyear = reann['IssueDate'][:4]
                old_status = reann['Status']            
                while (som < fcn):
                    #counts get buggered up when the issue is the last field in the filename - ie. '50$
                    #logger.fdebug('checking word - ' + str(fcnew[som]))
                    if ".cbr" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace(".cbr", "")
                    elif ".cbz" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace(".cbz", "")
                    if "(c2c)" in fcnew[som].lower():
                        fcnew[som] = fcnew[som].replace("(c2c)", " ")
                        get_issue = shlex.split(str(fcnew[som]))
                        if fcnew[som] != " ":
                            fcnew[som] = get_issue[0]
                    if fcnew[som].lower() == 'annual':
                        logger.fdebug('Annual detected.')
                        if fcnew[som+1].isdigit():
                            ann_iss = fcnew[som+1]
                            logger.fdebug('Annual # ' + str(ann_iss) + ' detected.')
                            fcdigit = helpers.issuedigits(ann_iss)
                    logger.fdebug('fcdigit:' + str(fcdigit))
                    logger.fdebug('int_iss:' + str(int_iss))
                    if int(fcdigit) == int_iss:
                        logger.fdebug('annual match - issue : ' + str(int_iss))
                        for d in annualdupechk:
                            if int(d['fcdigit']) == int(fcdigit):
                                logger.fdebug('duplicate annual issue detected - not counting this: ' + str(tmpfc['ComicFilename']))
                                issuedupe = "yes"
                                break
                        if issuedupe == "no":
                            logger.fdebug('matched...annual issue: ' + rescan['ComicName'] + '#' + str(reann['Issue_Number']) + ' --- ' + str(int_iss))
                            havefiles+=1
                            haveissue = "yes"
                            isslocation = str(tmpfc['ComicFilename'])
                            issSize = str(tmpfc['ComicSize'])
                            logger.fdebug('.......filename: ' + str(isslocation))
                            logger.fdebug('.......filesize: ' + str(tmpfc['ComicSize']))
                            # to avoid duplicate issues which screws up the count...let's store the filename issues then
                            # compare earlier...
                            annualdupechk.append({'fcdigit': int(fcdigit)})
                        break
                    som+=1
                if haveissue == "yes": break
                n+=1

        if issuedupe == "yes": pass
        else:
            #we have the # of comics, now let's update the db.
            #even if we couldn't find the physical issue, check the status.
            #-- if annuals aren't enabled, this will bugger out.
            writeit = True
            if mylar.ANNUALS_ON:
                if 'annual' in temploc.lower():
                    iss_id = reann['IssueID']
                else:
                    iss_id = reiss['IssueID']
            else:
                if 'annual' in temploc.lower():
                    logger.fdebug('Annual support not enabled, but annual issue present within directory. Ignoring annual.')
                    writeit = False
                else:
                    iss_id = reiss['IssueID']

            if writeit == True:
                logger.fdebug('issueID to write to db:' + str(iss_id))
                controlValueDict = {"IssueID": iss_id}

                #if Archived, increase the 'Have' count.
                #if archive:
                #    issStatus = "Archived"
  
                if haveissue == "yes":
                    issStatus = "Downloaded"
                    newValueDict = {"Location":           isslocation,
                                    "ComicSize":          issSize,
                                    "Status":             issStatus
                                    }

                    issID_to_ignore.append(str(iss_id))
   
                    if 'annual' in temploc.lower():
                        myDB.upsert("annuals", newValueDict, controlValueDict)
                    else:
                        myDB.upsert("issues", newValueDict, controlValueDict)
        fn+=1

    logger.fdebug('IssueID to ignore: ' + str(issID_to_ignore))

    #here we need to change the status of the ones we DIDN'T FIND above since the loop only hits on FOUND issues.
    update_iss = []
    tmpsql = "SELECT * FROM issues WHERE ComicID=? AND IssueID not in ({seq})".format(seq=','.join(['?']*(len(issID_to_ignore)-1)))
    chkthis = myDB.action(tmpsql, issID_to_ignore).fetchall()
#    chkthis = None
    if chkthis is None: 
        pass
    else:
        for chk in chkthis:
            old_status = chk['Status']
            logger.fdebug('old_status:' + str(old_status))
            if old_status == "Skipped":
                if mylar.AUTOWANT_ALL:
                    issStatus = "Wanted"
                else:
                    issStatus = "Skipped"
            elif old_status == "Archived":
                issStatus = "Archived"
            elif old_status == "Downloaded":
                issStatus = "Archived"
            elif old_status == "Wanted":
                issStatus = "Wanted"
            elif old_status == "Ignored":
                issStatus = "Ignored"
            elif old_status == "Snatched":   #this is needed for torrents, or else it'll keep on queuing..
                issStatus = "Snatched"
            else:
                issStatus = "Skipped"

            logger.fdebug("new status: " + str(issStatus))

            update_iss.append({"IssueID": chk['IssueID'],
                               "Status":  issStatus})
    
    if len(update_iss) > 0:
        i = 0
        #do it like this to avoid DB locks...
        for ui in update_iss:
            controlValueDict = {"IssueID": ui['IssueID']}
            newStatusValue = {"Status": ui['Status']}
            myDB.upsert("issues", newStatusValue, controlValueDict)
            i+=1
        logger.info('Updated the status of ' + str(i) + ' issues for ' + rescan['ComicName'] + ' (' + str(rescan['ComicYear']) + ') that were not found.')

    logger.info('Total files located: ' + str(havefiles))
    foundcount = havefiles
    arcfiles = 0
    # if filechecker returns 0 files (it doesn't find any), but some issues have a status of 'Archived'
    # the loop below won't work...let's adjust :)
    arcissues = myDB.action("SELECT count(*) FROM issues WHERE ComicID=? and Status='Archived'", [ComicID]).fetchall()
    if int(arcissues[0][0]) > 0:
        arcfiles = arcissues[0][0]
        havefiles = havefiles + arcfiles
        logger.fdebug('Adjusting have total to ' + str(havefiles) + ' because of this many archive files:' + str(arcfiles))

    ignorecount = 0
    if mylar.IGNORE_HAVETOTAL:   # if this is enabled, will increase Have total as if in Archived Status
        ignores = myDB.action("SELECT count(*) FROM issues WHERE ComicID=? AND Status='Ignored'", [ComicID]).fetchall()
        if int(ignores[0][0]) > 0:
            ignorecount = ignores[0][0]
            havefiles = havefiles + ignorecount
            logger.fdebug('Adjusting have total to ' + str(havefiles) + ' because of this many Ignored files:' + str(ignorecount))

    #now that we are finished...
    #adjust for issues that have been marked as Downloaded, but aren't found/don't exist.
    #do it here, because above loop only cycles though found comics using filechecker.
    downissues = myDB.select("SELECT * FROM issues WHERE ComicID=? and Status='Downloaded'", [ComicID])
    if downissues is None:
        pass
    else:
        archivedissues = 0 #set this to 0 so it tallies correctly.
        for down in downissues:
            #print "downlocation:" + str(down['Location'])
            #remove special characters from 
            #temploc = rescan['ComicLocation'].replace('_', ' ')
            #temploc = re.sub('[\#\'\/\.]', '', temploc)
            #print ("comiclocation: " + str(rescan['ComicLocation']))
            #print ("downlocation: " + str(down['Location']))
            if down['Location'] is None:
                logger.fdebug('location does not exist which means file was not downloaded successfully, or was moved.')
                controlValue = {"IssueID":  down['IssueID']}
                newValue = {"Status":    "Archived"}
                myDB.upsert("issues", newValue, controlValue)
                archivedissues+=1
                pass
            else:
                comicpath = os.path.join(rescan['ComicLocation'], down['Location'])
                if os.path.exists(comicpath):
                    pass
                    #print "Issue exists - no need to change status."
                else:
                    #print "Changing status from Downloaded to Archived - cannot locate file"
                    controlValue = {"IssueID":   down['IssueID']}
                    newValue = {"Status":    "Archived"}
                    myDB.upsert("issues", newValue, controlValue)
                    archivedissues+=1 
        totalarc = arcfiles + archivedissues
        havefiles = havefiles + archivedissues  #arcfiles already tallied in havefiles in above segment
        logger.fdebug('I have changed the status of ' + str(archivedissues) + ' issues to a status of Archived, as I now cannot locate them in the series directory.')

        
    #let's update the total count of comics that was found.
    controlValueStat = {"ComicID":     rescan['ComicID']}
    newValueStat = {"Have":            havefiles
                   }

    combined_total = rescan['Total'] + anncnt

    myDB.upsert("comics", newValueStat, controlValueStat)
    logger.info('I have physically found ' + str(foundcount) + ' issues, ignored ' + str(ignorecount) + ' issues, and accounted for ' + str(totalarc) + ' in an Archived state. Total Issue Count: ' + str(havefiles) + ' / ' + str(combined_total))

    return
Example #8
0
def upcoming_update(ComicID, ComicName, IssueNumber, IssueDate, forcecheck=None, futurepull=None, altissuenumber=None):
    # here we add to upcoming table...
    myDB = db.DBConnection()
    dspComicName = ComicName #to make sure that the word 'annual' will be displayed on screen
    if 'annual' in ComicName.lower(): 
        adjComicName = re.sub("\\bannual\\b", "", ComicName.lower()) # for use with comparisons.
        logger.fdebug('annual detected - adjusting name to : ' + adjComicName)
    else:
        adjComicName = ComicName
    controlValue = {"ComicID":      ComicID}
    newValue = {"ComicName":        adjComicName,
                "IssueNumber":      str(IssueNumber),
                "DisplayComicName": dspComicName,
                "IssueDate":        str(IssueDate)}

    #let's refresh the series here just to make sure if an issue is available/not.
    mismatch = "no"
    CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [ComicID]).fetchone()
    if CV_EXcomicid is None: pass
    else:
        if CV_EXcomicid['variloop'] == '99':
            mismatch = "yes"
    lastupdatechk = myDB.action("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
    if lastupdatechk is None:
        pullupd = "yes"
    else:
        c_date = lastupdatechk['LastUpdated']
        if c_date is None:
            logger.error(lastupdatechk['ComicName'] + ' failed during a previous add /refresh. Please either delete and readd the series, or try a refresh of the series.')
            return
        c_obj_date = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
        n_date = datetime.datetime.now()
        absdiff = abs(n_date - c_obj_date)
        hours = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 3600.0
        # no need to hammer the refresh 
        # let's check it every 5 hours (or more)
        #pullupd = "yes"
    if 'annual' in ComicName.lower():
        if mylar.ANNUALS_ON:
            issuechk = myDB.action("SELECT * FROM annuals WHERE ComicID=? AND Issue_Number=?", [ComicID, IssueNumber]).fetchone()
        else:
            logger.fdebug('Annual detected, but annuals not enabled. Ignoring result.')
            return
    else:
        issuechk = myDB.action("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [ComicID, IssueNumber]).fetchone()

    if issuechk is None and altissuenumber is not None:
        logger.info('altissuenumber is : ' + str(altissuenumber))
        issuechk = myDB.action("SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?", [ComicID, helpers.issuedigits(altissuenumber)]).fetchone()
    if issuechk is None:
        if futurepull is None:
            logger.fdebug(adjComicName + ' Issue: ' + str(IssueNumber) + ' not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.')
            # we need to either decrease the total issue count, OR indicate that an issue is upcoming.
            upco_results = myDB.action("SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?",[ComicID]).fetchall()
            upco_iss = upco_results[0][0]
            #logger.info("upco_iss: " + str(upco_iss))
            if int(upco_iss) > 0:
                #logger.info("There is " + str(upco_iss) + " of " + str(ComicName) + " that's not accounted for")
                newKey = {"ComicID": ComicID}
                newVal = {"not_updated_db": str(upco_iss)}
                myDB.upsert("comics", newVal, newKey)
            elif int(upco_iss) <=0 and lastupdatechk['not_updated_db']:
               #if not_updated_db has a value, and upco_iss is > 0, let's zero it back out cause it's updated now.
                newKey = {"ComicID": ComicID}
                newVal = {"not_updated_db": ""}
                myDB.upsert("comics", newVal, newKey)

            if hours > 5 or forcecheck == 'yes':
                pullupd = "yes"
                logger.fdebug('Now Refreshing comic ' + ComicName + ' to make sure it is up-to-date')
                if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd)
                else: mylar.importer.addComictoDB(ComicID,mismatch,pullupd)
            else:
                logger.fdebug('It has not been longer than 5 hours since we last did this...we will wait so we do not hammer things.')
                return
        else:
            # if futurepull is not None, let's just update the status and ComicID
            # NOTE: THIS IS CREATING EMPTY ENTRIES IN THE FUTURE TABLE. ???
            nKey = {"ComicID": ComicID}
            nVal = {"Status": "Wanted"}
            myDB.upsert("future", nVal, nKey)

    if issuechk is not None:
        if issuechk['Issue_Number'] == IssueNumber or issuechk['Issue_Number'] == altissuenumber:
            logger.fdebug('Comic series already up-to-date ... no need to refresh at this time.')
            logger.fdebug('Available to be marked for download - checking...' + adjComicName + ' Issue: ' + str(issuechk['Issue_Number']))
            logger.fdebug('...Existing status: ' + str(issuechk['Status']))
            control = {"IssueID":   issuechk['IssueID']}
            newValue['IssueID'] = issuechk['IssueID']
            if issuechk['Status'] == "Snatched":
                values = { "Status":   "Snatched"}
                newValue['Status'] = "Snatched"
            elif issuechk['Status'] == "Downloaded":
                values = { "Status":    "Downloaded"}
                newValue['Status'] = "Downloaded"
                #if the status is Downloaded and it's on the pullist - let's mark it so everyone can bask in the glory

            elif issuechk['Status'] == "Wanted":
                values = { "Status":    "Wanted"}
                newValue['Status'] = "Wanted"            
            elif issuechk['Status'] == "Archived":
                values = { "Status":    "Archived"}
                newValue['Status'] = "Archived"
            else:
                values = { "Status":    "Skipped"}
                newValue['Status'] = "Skipped"
            #was in wrong place :(
        else:
            logger.fdebug('Issues do not match for some reason...weekly new issue: ' + str(IssueNumber))
            return

    if mylar.AUTOWANT_UPCOMING:
        #for issues not in db - to be added to Upcoming table.
        if issuechk is None:
            newValue['Status'] = "Wanted"
            logger.fdebug('...Changing Status to Wanted and throwing it in the Upcoming section since it is not published yet.')
        #this works for issues existing in DB...        
        elif issuechk['Status'] == "Skipped":
            newValue['Status'] = "Wanted"
            values = {"Status":  "Wanted"}
            logger.fdebug('...New status of Wanted')
        elif issuechk['Status'] == "Wanted":
            logger.fdebug('...Status already Wanted .. not changing.')
        else:
            logger.fdebug('...Already have issue - keeping existing status of : ' + str(issuechk['Status']))

    if issuechk is None:
        myDB.upsert("upcoming", newValue, controlValue)
    else:
        logger.fdebug('--attempt to find errant adds to Wanted list')
        logger.fdebug('UpcomingNewValue: ' + str(newValue))
        logger.fdebug('UpcomingcontrolValue: ' + str(controlValue))
        if issuechk['IssueDate'] == '0000-00-00' and newValue['IssueDate'] != '0000-00-00':
            logger.fdebug('Found a 0000-00-00 issue - force updating series to try and get it proper.')
            dateVal = {"IssueDate":        newValue['IssueDate'],
                       "ComicName":        issuechk['ComicName'],
                       "Status":           newValue['Status'],
                       "IssueNumber":      issuechk['Issue_Number']}
            logger.fdebug('updating date in upcoming table to : ' + str(newValue['IssueDate']))
            logger.fdebug('ComicID:' + str(controlValue))
            myDB.upsert("upcoming", dateVal, controlValue)
            logger.fdebug('Temporarily putting the Issue Date for ' + str(issuechk['Issue_Number']) + ' to ' + str(newValue['IssueDate']))
            values = {"IssueDate":  newValue['IssueDate']}
            #if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd='yes')
            #else: mylar.importer.addComictoDB(ComicID,mismatch,pullupd='yes')

        if 'annual' in ComicName.lower():
            myDB.upsert("annuals", values, control)
        else:
            myDB.upsert("issues", values, control)

        if issuechk['Status'] == 'Downloaded' or issuechk['Status'] == 'Archived' or issuechk['Status'] == 'Snatched': 
            logger.fdebug('updating Pull-list to reflect status.')
            downstats = {"Status":  issuechk['Status'],
                         "ComicID": issuechk['ComicID']}
            return downstats
Example #9
0
    def walk_the_walk(self):
        folder_location = mylar.CONFIG.FOLDER_CACHE_LOCATION
        if folder_location is None:
            return {'status': False}

        logger.info('checking locally...')
        filelist = None

        logger.info('check_folder_cache: %s' % (mylar.CHECK_FOLDER_CACHE))
        if mylar.CHECK_FOLDER_CACHE is not None:
            rd = mylar.CHECK_FOLDER_CACHE #datetime.datetime.utcfromtimestamp(mylar.CHECK_FOLDER_CACHE)
            rd_mins = rd + datetime.timedelta(seconds = 600)  #10 minute cache retention
            rd_now = datetime.datetime.utcfromtimestamp(time.time())
            if calendar.timegm(rd_mins.utctimetuple()) > calendar.timegm(rd_now.utctimetuple()):
                # if < 10 minutes since last check, use cached listing
                logger.info('using cached folder listing since < 10 minutes since last file check.')
                filelist = mylar.FOLDER_CACHE

        if filelist is None:
            logger.info('generating new directory listing for folder_cache')
            flc = filechecker.FileChecker(folder_location, justparse=True, pp_mode=True)
            mylar.FOLDER_CACHE = flc.listFiles()
            mylar.CHECK_FOLDER_CACHE = datetime.datetime.utcfromtimestamp(helpers.utctimestamp())

        local_status = False
        filepath = None
        filename = None
        for fl in mylar.FOLDER_CACHE['comiclist']:
            logger.info('fl: %s' % (fl,))
            if self.arc is not None:
                comicname = self.arc['ComicName']
                corrected_type = None
                alternatesearch = None
                booktype = self.arc['Type']
                publisher = self.arc['Publisher']
                issuenumber = self.arc['IssueNumber']
                issuedate = self.arc['IssueDate']
                issuename = self.arc['IssueName']
                issuestatus = self.arc['Status']
            elif self.comic is not None:
                comicname = self.comic['ComicName']
                booktype = self.comic['Type']
                corrected_type = self.comic['Corrected_Type']
                alternatesearch = self.comic['AlternateSearch']
                publisher = self.comic['ComicPublisher']
                issuenumber = self.issue['Issue_Number']
                issuedate = self.issue['IssueDate']
                issuename = self.issue['IssueName']
                issuestatus = self.issue['Status']
            else:
                # weekly - (one/off)
                comicname = self.weekly['COMIC']
                booktype = self.weekly['format']
                corrected_type = None
                alternatesearch = None
                publisher = self.weekly['PUBLISHER']
                issuenumber = self.weekly['ISSUE']
                issuedate = self.weekly['SHIPDATE']
                issuename = None
                issuestatus = self.weekly['STATUS']

            if booktype is not None:
                if (all([booktype != 'Print', booktype != 'Digital', booktype != 'None', booktype is not None]) and corrected_type != 'Print') or any([corrected_type == 'TPB', corrected_type == 'GN', corrected_type == 'HC']):
                    if booktype == 'One-Shot' and corrected_type is None:
                        booktype = 'One-Shot'
                    else:
                        if booktype == 'GN' and corrected_type is None:
                            booktype = 'GN'
                        elif booktype == 'HC' and corrected_type is None:
                            booktype = 'HC'
                        else:
                            booktype = 'TPB'

            wm = filechecker.FileChecker(watchcomic=comicname, Publisher=publisher, AlternateSearch=alternatesearch)
            watchmatch = wm.matchIT(fl)

            logger.info('watchmatch: %s' % (watchmatch,))

            # this is all for a really general type of match - if passed, the post-processing checks will do the real brunt work
            if watchmatch['process_status'] == 'fail':
                continue

            if watchmatch['justthedigits'] is not None:
                temploc= watchmatch['justthedigits'].replace('_', ' ')
                if "Director's Cut" not in temploc:
                    temploc = re.sub('[\#\']', '', temploc)
            else:
                if any([booktype == 'TPB', booktype =='GN', booktype == 'HC', booktype == 'One-Shot']):
                    temploc = '1'
                else:
                    temploc = None
                    continue

            int_iss = helpers.issuedigits(issuenumber)
            issyear = issuedate[:4]
            old_status = issuestatus
            issname = issuename


            if temploc is not None:
                fcdigit = helpers.issuedigits(temploc)
            elif any([booktype == 'TPB', booktype == 'GN', booktype == 'GC', booktype == 'One-Shot']) and temploc is None:
                fcdigit = helpers.issuedigits('1')

            if int(fcdigit) == int_iss:
                logger.fdebug('[%s] Issue match - #%s' % (self.issueid, self.issue['Issue_Number']))
                local_status = True
                if watchmatch['sub'] is None:
                    filepath = watchmatch['comiclocation']
                    filename = watchmatch['comicfilename']
                else:
                    filepath = os.path.join(watchmatch['comiclocation'], watchmatch['sub'])
                    filename = watchmatch['comicfilename']
                break


        #if local_status is True:
            #try:
            #    copied_folder = os.path.join(mylar.CONFIG.CACHE_DIR, 'tmp_filer')
            #    if os.path.exists(copied_folder):
            #        shutil.rmtree(copied_folder)
            #    os.mkdir(copied_folder)
            #    logger.info('created temp directory: %s' % copied_folder)
            #    shutil.copy(os.path.join(filepath, filename), copied_folder)

            #except Exception as e:
            #    logger.error('[%s] error: %s' % (e, filepath))
            #    filepath = None
            #    local_status = False
            #else:
            #filepath = os.path.join(copied_folder, filename)
            #logger.info('Successfully copied file : %s' % filepath)

        return {'status': local_status,
                'filename': filename,
                'filepath': filepath}
Example #10
0
    def Process(self):
            self._log("nzb name: " + str(self.nzb_name), logger.DEBUG)
            self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
            logger.fdebug("nzb name: " + str(self.nzb_name))
            logger.fdebug("nzb folder: " + str(self.nzb_folder))
            if mylar.USE_SABNZBD==0:
                logger.fdebug("Not using SABnzbd")
            elif mylar.USE_SABNZBD != 0 and self.nzb_name == 'Manual Run':
                logger.fdebug('Not using SABnzbd : Manual Run')
            else:
                # if the SAB Directory option is enabled, let's use that folder name and append the jobname.
                if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4:
                    self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
    
                #lookup nzb_name in nzblog table to get issueid
    
                #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
                #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
                querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
                #logger.info("querysab_string:" + str(querysab))
                file = urllib2.urlopen(querysab)
                data = file.read()
                file.close()
                dom = parseString(data)

                try:
                    sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
                except:
                    errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText
                    logger.error(u"Error detected attempting to retrieve SAB data : " + errorm)
                    return
                sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
                logger.fdebug("SAB Replace Spaces: " + str(sabreps))
                logger.fdebug("SAB Replace Dots: " + str(sabrepd))
            if mylar.USE_NZBGET==1:
                logger.fdebug("Using NZBGET")
                logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
            myDB = db.DBConnection()

            if self.nzb_name == 'Manual Run':
                print ("manual run initiated")
                #Manual postprocessing on a folder.
                #use the nzb_folder to determine every file
                #walk the dir,
                #once a series name and issue are matched,
                #write the series/issue/filename to a tuple
                #when all done, iterate over the tuple until completion...
                comicseries = myDB.action("SELECT * FROM comics")
                manual_list = []
                if comicseries is None: 
                    logger.error(u"No Series in Watchlist - aborting Manual Post Processing. Maybe you should be running Import?")
                    return
                else:
                    ccnt=0
                    nm=0
                    watchvals = {}
                    for cs in comicseries:
                        watchvals = {"SeriesYear":   cs['ComicYear'],
                                     "LatestDate":   cs['LatestDate'],
                                     "ComicVersion": cs['ComicVersion'],
                                     "Total":        cs['Total']}
                        watchmatch = filechecker.listFiles(self.nzb_folder,cs['ComicName'],cs['AlternateSearch'], manual=watchvals)
                        if watchmatch is None:
                            nm+=1
                            continue
                        else:
                            fn = 0
                            fccnt = int(watchmatch['comiccount'])
                            if len(watchmatch) == 1: continue
                            while (fn < fccnt):
                                try:
                                    tmpfc = watchmatch['comiclist'][fn]
                                except IndexError,KeyError:
                                    break
                                temploc= tmpfc['JusttheDigits'].replace('_', ' ')
                                temploc = re.sub('[\#\']', '', temploc)
                                logger.fdebug("temploc: " + str(temploc))

                                ww = shlex.split(temploc)
                                lnw = len(ww)
                                wdc = 0
                                while (wdc < lnw):
                                    #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
                                    if ".cbr" in ww[wdc].lower():
                                        ww[wdc] = ww[wdc].replace(".cbr", "")
                                    elif ".cbz" in ww[wdc].lower():
                                        ww[wdc] = ww[wdc].replace(".cbz", "")
                                    if "(c2c)" in ww[wdc].lower():
                                        ww[wdc] = ww[wdc].replace("(c2c)", " ")
                                        get_issue = shlex.split(str(ww[wdc]))
                                        if ww[wdc] != " ":
                                            ww[wdc] = get_issue[0]

                                    if '.' in ww[wdc]:
                                    #logger.fdebug("decimal detected...adjusting.")
                                        try:
                                            i = float(ww[wdc])
                                        except ValueError, TypeError:
                                        #not numeric
                                        #logger.fdebug("NOT NUMERIC - new word: " + str(ww[wdc]))
                                            ww[wdc] = ww[wdc].replace(".", "")
                                    else:
                                        #numeric
                                        pass

                                    if ww[wdc].isdigit():
                                        if int(ww[wdc]) > 0:
                                            if wdc+1 < len(ww) and 'au' in ww[wdc+1].lower():
                                                if len(ww[wdc+1]) == 2:
                                                #if the 'AU' is in 005AU vs 005 AU it will yield different results.
                                                    ww[wdc] = ww[wdc] + 'AU'
                                                    ww[wdc+1] = '93939999919190933'
                                                    logger.info("AU Detected seperate from issue - combining and continuing")

                                    fcdigit = helpers.issuedigits(ww[wdc])
                                    if 'annual' in self.nzb_name.lower():
                                        logger.info("annual detected.")
                                        annchk = "yes"
                                        issuechk = myDB.action("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()
                                    else:
                                        issuechk = myDB.action("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [cs['ComicID'],fcdigit]).fetchone()

                                    if issuechk is None:
                                        logger.info("No corresponding issue # found for " + str(cs['ComicID']))
                                    else:
                                        logger.info("Found matching issue # " + str(fcdigit) + " for ComicID: " + str(cs['ComicID']) + " / IssueID: " + str(issuechk['IssueID']))
                                        manual_list.append({"ComicLocation":   tmpfc['ComicLocation'],
                                                            "ComicID":         cs['ComicID'],
                                                            "IssueID":         issuechk['IssueID'],
                                                            "IssueNumber":     issuechk['Issue_Number'],
                                                            "ComicName":       cs['ComicName']})
                                        ccnt+=1
                                        print manual_list
                                    wdc+=1
                                fn+=1
                    print("There are " + str(len(manual_list)) + " files found that match on your watchlist, " + str(nm) + " do not match anything and will be ignored.")    
Example #11
0
def upcoming_update(ComicID,
                    ComicName,
                    IssueNumber,
                    IssueDate,
                    forcecheck=None,
                    futurepull=None,
                    altissuenumber=None):
    # here we add to upcoming table...
    myDB = db.DBConnection()
    dspComicName = ComicName  #to make sure that the word 'annual' will be displayed on screen
    if 'annual' in ComicName.lower():
        adjComicName = re.sub("\\bannual\\b", "",
                              ComicName.lower())  # for use with comparisons.
        logger.fdebug('annual detected - adjusting name to : ' + adjComicName)
    else:
        adjComicName = ComicName
    controlValue = {"ComicID": ComicID}
    newValue = {
        "ComicName": adjComicName,
        "IssueNumber": str(IssueNumber),
        "DisplayComicName": dspComicName,
        "IssueDate": str(IssueDate)
    }

    #let's refresh the series here just to make sure if an issue is available/not.
    mismatch = "no"
    CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?",
                               [ComicID]).fetchone()
    if CV_EXcomicid is None: pass
    else:
        if CV_EXcomicid['variloop'] == '99':
            mismatch = "yes"
    lastupdatechk = myDB.action("SELECT * FROM comics WHERE ComicID=?",
                                [ComicID]).fetchone()
    if lastupdatechk is None:
        pullupd = "yes"
    else:
        c_date = lastupdatechk['LastUpdated']
        if c_date is None:
            logger.error(
                lastupdatechk['ComicName'] +
                ' failed during a previous add /refresh. Please either delete and readd the series, or try a refresh of the series.'
            )
            return
        c_obj_date = datetime.datetime.strptime(c_date, "%Y-%m-%d %H:%M:%S")
        n_date = datetime.datetime.now()
        absdiff = abs(n_date - c_obj_date)
        hours = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 3600.0
        # no need to hammer the refresh
        # let's check it every 5 hours (or more)
        #pullupd = "yes"
    if 'annual' in ComicName.lower():
        if mylar.ANNUALS_ON:
            issuechk = myDB.action(
                "SELECT * FROM annuals WHERE ComicID=? AND Issue_Number=?",
                [ComicID, IssueNumber]).fetchone()
        else:
            logger.fdebug(
                'Annual detected, but annuals not enabled. Ignoring result.')
            return
    else:
        issuechk = myDB.action(
            "SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?",
            [ComicID, IssueNumber]).fetchone()

    if issuechk is None and altissuenumber is not None:
        logger.info('altissuenumber is : ' + str(altissuenumber))
        issuechk = myDB.action(
            "SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?",
            [ComicID, helpers.issuedigits(altissuenumber)]).fetchone()
    if issuechk is None:
        if futurepull is None:
            logger.fdebug(
                adjComicName + ' Issue: ' + str(IssueNumber) +
                ' not present in listings to mark for download...updating comic and adding to Upcoming Wanted Releases.'
            )
            # we need to either decrease the total issue count, OR indicate that an issue is upcoming.
            upco_results = myDB.action(
                "SELECT COUNT(*) FROM UPCOMING WHERE ComicID=?",
                [ComicID]).fetchall()
            upco_iss = upco_results[0][0]
            #logger.info("upco_iss: " + str(upco_iss))
            if int(upco_iss) > 0:
                #logger.info("There is " + str(upco_iss) + " of " + str(ComicName) + " that's not accounted for")
                newKey = {"ComicID": ComicID}
                newVal = {"not_updated_db": str(upco_iss)}
                myDB.upsert("comics", newVal, newKey)
            elif int(upco_iss) <= 0 and lastupdatechk['not_updated_db']:
                #if not_updated_db has a value, and upco_iss is > 0, let's zero it back out cause it's updated now.
                newKey = {"ComicID": ComicID}
                newVal = {"not_updated_db": ""}
                myDB.upsert("comics", newVal, newKey)

            if hours > 5 or forcecheck == 'yes':
                pullupd = "yes"
                logger.fdebug('Now Refreshing comic ' + ComicName +
                              ' to make sure it is up-to-date')
                if ComicID[:1] == "G":
                    mylar.importer.GCDimport(ComicID, pullupd)
                else:
                    mylar.importer.addComictoDB(ComicID, mismatch, pullupd)
            else:
                logger.fdebug(
                    'It has not been longer than 5 hours since we last did this...we will wait so we do not hammer things.'
                )
                return
        else:
            # if futurepull is not None, let's just update the status and ComicID
            # NOTE: THIS IS CREATING EMPTY ENTRIES IN THE FUTURE TABLE. ???
            nKey = {"ComicID": ComicID}
            nVal = {"Status": "Wanted"}
            myDB.upsert("future", nVal, nKey)

    if issuechk is not None:
        if issuechk['Issue_Number'] == IssueNumber or issuechk[
                'Issue_Number'] == altissuenumber:
            logger.fdebug(
                'Comic series already up-to-date ... no need to refresh at this time.'
            )
            logger.fdebug('Available to be marked for download - checking...' +
                          adjComicName + ' Issue: ' +
                          str(issuechk['Issue_Number']))
            logger.fdebug('...Existing status: ' + str(issuechk['Status']))
            control = {"IssueID": issuechk['IssueID']}
            newValue['IssueID'] = issuechk['IssueID']
            if issuechk['Status'] == "Snatched":
                values = {"Status": "Snatched"}
                newValue['Status'] = "Snatched"
            elif issuechk['Status'] == "Downloaded":
                values = {"Status": "Downloaded"}
                newValue['Status'] = "Downloaded"
                #if the status is Downloaded and it's on the pullist - let's mark it so everyone can bask in the glory

            elif issuechk['Status'] == "Wanted":
                values = {"Status": "Wanted"}
                newValue['Status'] = "Wanted"
            elif issuechk['Status'] == "Archived":
                values = {"Status": "Archived"}
                newValue['Status'] = "Archived"
            else:
                values = {"Status": "Skipped"}
                newValue['Status'] = "Skipped"
            #was in wrong place :(
        else:
            logger.fdebug(
                'Issues do not match for some reason...weekly new issue: ' +
                str(IssueNumber))
            return

    if mylar.AUTOWANT_UPCOMING:
        #for issues not in db - to be added to Upcoming table.
        if issuechk is None:
            newValue['Status'] = "Wanted"
            logger.fdebug(
                '...Changing Status to Wanted and throwing it in the Upcoming section since it is not published yet.'
            )
        #this works for issues existing in DB...
        elif issuechk['Status'] == "Skipped":
            newValue['Status'] = "Wanted"
            values = {"Status": "Wanted"}
            logger.fdebug('...New status of Wanted')
        elif issuechk['Status'] == "Wanted":
            logger.fdebug('...Status already Wanted .. not changing.')
        else:
            logger.fdebug(
                '...Already have issue - keeping existing status of : ' +
                str(issuechk['Status']))

    if issuechk is None:
        myDB.upsert("upcoming", newValue, controlValue)
    else:
        logger.fdebug('--attempt to find errant adds to Wanted list')
        logger.fdebug('UpcomingNewValue: ' + str(newValue))
        logger.fdebug('UpcomingcontrolValue: ' + str(controlValue))
        if issuechk['IssueDate'] == '0000-00-00' and newValue[
                'IssueDate'] != '0000-00-00':
            logger.fdebug(
                'Found a 0000-00-00 issue - force updating series to try and get it proper.'
            )
            dateVal = {
                "IssueDate": newValue['IssueDate'],
                "ComicName": issuechk['ComicName'],
                "Status": newValue['Status'],
                "IssueNumber": issuechk['Issue_Number']
            }
            logger.fdebug('updating date in upcoming table to : ' +
                          str(newValue['IssueDate']))
            logger.fdebug('ComicID:' + str(controlValue))
            myDB.upsert("upcoming", dateVal, controlValue)
            logger.fdebug('Temporarily putting the Issue Date for ' +
                          str(issuechk['Issue_Number']) + ' to ' +
                          str(newValue['IssueDate']))
            values = {"IssueDate": newValue['IssueDate']}
            #if ComicID[:1] == "G": mylar.importer.GCDimport(ComicID,pullupd='yes')
            #else: mylar.importer.addComictoDB(ComicID,mismatch,pullupd='yes')

        if 'annual' in ComicName.lower():
            myDB.upsert("annuals", values, control)
        else:
            myDB.upsert("issues", values, control)

        if issuechk['Status'] == 'Downloaded' or issuechk[
                'Status'] == 'Archived' or issuechk['Status'] == 'Snatched':
            logger.fdebug('updating Pull-list to reflect status.')
            downstats = {
                "Status": issuechk['Status'],
                "ComicID": issuechk['ComicID']
            }
            return downstats
Example #12
0
    def Process(self):
        self._log("nzb name: " + str(self.nzb_name), logger.DEBUG)
        self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
        logger.fdebug("nzb name: " + str(self.nzb_name))
        logger.fdebug("nzb folder: " + str(self.nzb_folder))
        if mylar.USE_SABNZBD == 0:
            logger.fdebug("Not using SABnzbd")
        elif mylar.USE_SABNZBD != 0 and self.nzb_name == 'Manual Run':
            logger.fdebug('Not using SABnzbd : Manual Run')
        else:
            # if the SAB Directory option is enabled, let's use that folder name and append the jobname.
            if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(
                    mylar.SAB_DIRECTORY) > 4:
                self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY,
                                               self.nzb_name).encode(
                                                   mylar.SYS_ENCODING)
                logger.fdebug(
                    'SABnzbd Download folder option enabled. Directory set to : '
                    + self.nzb_folder)

    # -- start. not used.
    #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
    #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
    #querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
    #logger.info("querysab_string:" + str(querysab))
    #file = urllib2.urlopen(querysab)
    #data = file.read()
    #file.close()
    #dom = parseString(data)

    #try:
    #    sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
    #except:
    #    errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText
    #    logger.error(u"Error detected attempting to retrieve SAB data : " + errorm)
    #    return
    #sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
    #logger.fdebug("SAB Replace Spaces: " + str(sabreps))
    #logger.fdebug("SAB Replace Dots: " + str(sabrepd))
    # -- end. not used.

        if mylar.USE_NZBGET == 1:
            logger.fdebug("Using NZBGET")
            logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
            # if the NZBGet Directory option is enabled, let's use that folder name and append the jobname.
            if mylar.NZBGET_DIRECTORY is not None and mylar.NZBGET_DIRECTORY is not 'None' and len(
                    mylar.NZBGET_DIRECTORY) > 4:
                self.nzb_folder = os.path.join(mylar.NZBGET_DIRECTORY,
                                               self.nzb_name).encode(
                                                   mylar.SYS_ENCODING)
                logger.fdebug(
                    'NZBGET Download folder option enabled. Directory set to : '
                    + self.nzb_folder)
        myDB = db.DBConnection()

        if self.nzb_name == 'Manual Run':
            logger.fdebug("manual run initiated")
            #Manual postprocessing on a folder.
            #use the nzb_folder to determine every file
            #walk the dir,
            #once a series name and issue are matched,
            #write the series/issue/filename to a tuple
            #when all done, iterate over the tuple until completion...
            comicseries = myDB.action("SELECT * FROM comics")
            manual_list = []
            if comicseries is None:
                logger.error(
                    u"No Series in Watchlist - aborting Manual Post Processing. Maybe you should be running Import?"
                )
                return
            else:
                ccnt = 0
                nm = 0
                watchvals = {}
                for cs in comicseries:
                    watchvals = {
                        "SeriesYear": cs['ComicYear'],
                        "LatestDate": cs['LatestDate'],
                        "ComicVersion": cs['ComicVersion'],
                        "Publisher": cs['ComicPublisher'],
                        "Total": cs['Total']
                    }
                    watchmatch = filechecker.listFiles(self.nzb_folder,
                                                       cs['ComicName'],
                                                       cs['ComicPublisher'],
                                                       cs['AlternateSearch'],
                                                       manual=watchvals)
                    if watchmatch['comiccount'] == 0:  # is None:
                        nm += 1
                        continue
                    else:
                        fn = 0
                        fccnt = int(watchmatch['comiccount'])
                        if len(watchmatch) == 1: continue
                        while (fn < fccnt):
                            try:
                                tmpfc = watchmatch['comiclist'][fn]
                            except IndexError, KeyError:
                                break
                            temploc = tmpfc['JusttheDigits'].replace('_', ' ')
                            temploc = re.sub('[\#\']', '', temploc)

                            if 'annual' in temploc.lower():
                                logger.info("annual detected.")
                                annchk = "yes"
                                fcdigit = helpers.issuedigits(
                                    re.sub('annual', '',
                                           str(temploc.lower())).strip())
                                issuechk = myDB.action(
                                    "SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?",
                                    [cs['ComicID'], fcdigit]).fetchone()
                            else:
                                fcdigit = helpers.issuedigits(temploc)
                                issuechk = myDB.action(
                                    "SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?",
                                    [cs['ComicID'], fcdigit]).fetchone()

                            if issuechk is None:
                                logger.info(
                                    "No corresponding issue # found for " +
                                    str(cs['ComicID']))
                            else:
                                datematch = "True"
                                if len(watchmatch) > 1 and tmpfc[
                                        'ComicYear'] is not None:
                                    #if the # of matches is more than 1, we need to make sure we get the right series
                                    #compare the ReleaseDate for the issue, to the found issue date in the filename.
                                    #if ReleaseDate doesn't exist, use IssueDate
                                    #if no issue date was found, then ignore.
                                    issyr = None
                                    if int(issuechk['IssueDate']
                                           [5:7]) == 11 or issuechk[
                                               'IssueDate'][5:7] == 12:
                                        issyr = int(issuechk['IssueDate'][:4])
                                    elif int(
                                            issuechk['IssueDate']
                                        [5:7]) == 1 or int(
                                            issuechk['IssueDate'][5:7]) == 2:
                                        issyr = int(issuechk['IssueDate'][:4])

                                    if issuechk['ReleaseDate'] is not None:
                                        if int(issuechk['ReleaseDate']
                                               [:4]) < int(tmpfc['ComicYear']):
                                            logger.fdebug(
                                                str(issuechk['ReleaseDate']) +
                                                ' is before the issue year of '
                                                + str(tmpfc['ComicYear']) +
                                                ' that was discovered in the filename'
                                            )
                                            datematch = "False"

                                    else:
                                        if int(issuechk['IssueDate']
                                               [:4]) < int(tmpfc['ComicYear']):
                                            logger.fdebug(
                                                str(issuechk['IssueDate']) +
                                                ' is before the issue year ' +
                                                str(tmpfc['ComicYear']) +
                                                ' that was discovered in the filename'
                                            )
                                            datematch = "False"

                                    if datematch == "False" and issyr is not None:
                                        logger.fdebug(
                                            str(issyr) + ' comparing to ' +
                                            str(tmpfc['ComicYear']) +
                                            'rechecking by month-check versus year.'
                                        )
                                        datematch == "True"
                                        if int(issyr) != int(
                                                tmpfc['ComicYear']):
                                            logger.fdebug(
                                                '[fail] Issue is before the modified issue year of '
                                                + str(issyr))
                                            datematch = "False"

                                else:
                                    logger.info("Found matching issue # " +
                                                str(fcdigit) +
                                                " for ComicID: " +
                                                str(cs['ComicID']) +
                                                " / IssueID: " +
                                                str(issuechk['IssueID']))

                                if datematch == "True":
                                    manual_list.append({
                                        "ComicLocation":
                                        tmpfc['ComicLocation'],
                                        "ComicID":
                                        cs['ComicID'],
                                        "IssueID":
                                        issuechk['IssueID'],
                                        "IssueNumber":
                                        issuechk['Issue_Number'],
                                        "ComicName":
                                        cs['ComicName']
                                    })
                                else:
                                    logger.fdebug(
                                        'Incorrect series - not populating..continuing post-processing'
                                    )
                                #ccnt+=1

                            fn += 1
                logger.fdebug("There are " + str(len(manual_list)) +
                              " files found that match on your watchlist, " +
                              str(nm) +
                              " do not match anything and will be ignored.")
Example #13
0
    def Process(self):
        self._log("nzb name: " + str(self.nzb_name), logger.DEBUG)
        self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
        logger.fdebug("nzb name: " + str(self.nzb_name))
        logger.fdebug("nzb folder: " + str(self.nzb_folder))
        if mylar.USE_SABNZBD == 0:
            logger.fdebug("Not using SABnzbd")
        elif mylar.USE_SABNZBD != 0 and self.nzb_name == 'Manual Run':
            logger.fdebug('Not using SABnzbd : Manual Run')
        else:
            # if the SAB Directory option is enabled, let's use that folder name and append the jobname.
            if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(
                    mylar.SAB_DIRECTORY) > 4:
                self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY,
                                               self.nzb_name).encode(
                                                   mylar.SYS_ENCODING)
                logger.fdebug(
                    'SABnzbd Download folder option enabled. Directory set to : '
                    + self.nzb_folder)

            #lookup nzb_name in nzblog table to get issueid

            #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
            #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
            querysab = str(
                mylar.SAB_HOST
            ) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(
                mylar.SAB_APIKEY)
            #logger.info("querysab_string:" + str(querysab))
            file = urllib2.urlopen(querysab)
            data = file.read()
            file.close()
            dom = parseString(data)

            try:
                sabreps = dom.getElementsByTagName(
                    'replace_spaces')[0].firstChild.wholeText
            except:
                errorm = dom.getElementsByTagName(
                    'error')[0].firstChild.wholeText
                logger.error(
                    u"Error detected attempting to retrieve SAB data : " +
                    errorm)
                return
            sabrepd = dom.getElementsByTagName(
                'replace_dots')[0].firstChild.wholeText
            logger.fdebug("SAB Replace Spaces: " + str(sabreps))
            logger.fdebug("SAB Replace Dots: " + str(sabrepd))
        if mylar.USE_NZBGET == 1:
            logger.fdebug("Using NZBGET")
            logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
            # if the NZBGet Directory option is enabled, let's use that folder name and append the jobname.
            if mylar.NZBGET_DIRECTORY is not None and mylar.NZBGET_DIRECTORY is not 'None' and len(
                    mylar.NZBGET_DIRECTORY) > 4:
                self.nzb_folder = os.path.join(mylar.NZBGET_DIRECTORY,
                                               self.nzb_name).encode(
                                                   mylar.SYS_ENCODING)
                logger.fdebug(
                    'NZBGET Download folder option enabled. Directory set to : '
                    + self.nzb_folder)
        myDB = db.DBConnection()

        if self.nzb_name == 'Manual Run':
            logger.fdebug("manual run initiated")
            #Manual postprocessing on a folder.
            #use the nzb_folder to determine every file
            #walk the dir,
            #once a series name and issue are matched,
            #write the series/issue/filename to a tuple
            #when all done, iterate over the tuple until completion...
            comicseries = myDB.action("SELECT * FROM comics")
            manual_list = []
            if comicseries is None:
                logger.error(
                    u"No Series in Watchlist - aborting Manual Post Processing. Maybe you should be running Import?"
                )
                return
            else:
                ccnt = 0
                nm = 0
                watchvals = {}
                for cs in comicseries:
                    watchvals = {
                        "SeriesYear": cs['ComicYear'],
                        "LatestDate": cs['LatestDate'],
                        "ComicVersion": cs['ComicVersion'],
                        "Publisher": cs['ComicPublisher'],
                        "Total": cs['Total']
                    }
                    watchmatch = filechecker.listFiles(self.nzb_folder,
                                                       cs['ComicName'],
                                                       cs['ComicPublisher'],
                                                       cs['AlternateSearch'],
                                                       manual=watchvals)
                    if watchmatch['comiccount'] == 0:  # is None:
                        nm += 1
                        continue
                    else:
                        fn = 0
                        fccnt = int(watchmatch['comiccount'])
                        if len(watchmatch) == 1: continue
                        while (fn < fccnt):
                            try:
                                tmpfc = watchmatch['comiclist'][fn]
                            except IndexError, KeyError:
                                break
                            temploc = tmpfc['JusttheDigits'].replace('_', ' ')
                            temploc = re.sub('[\#\']', '', temploc)
                            #logger.fdebug("temploc: " + str(temploc))

                            ww = shlex.split(temploc)
                            lnw = len(ww)
                            wdc = 0
                            while (wdc < lnw):
                                #counts get buggered up when the issue is the last field in the filename - ie. '50.cbr'
                                if ".cbr" in ww[wdc].lower():
                                    ww[wdc] = ww[wdc].replace(".cbr", "")
                                elif ".cbz" in ww[wdc].lower():
                                    ww[wdc] = ww[wdc].replace(".cbz", "")
                                if "(c2c)" in ww[wdc].lower():
                                    ww[wdc] = ww[wdc].replace("(c2c)", " ")
                                    get_issue = shlex.split(str(ww[wdc]))
                                    if ww[wdc] != " ":
                                        ww[wdc] = get_issue[0]

                                if '.' in ww[wdc]:
                                    #logger.fdebug("decimal detected...adjusting.")
                                    try:
                                        i = float(ww[wdc])
                                    except ValueError, TypeError:
                                        #not numeric
                                        #logger.fdebug("NOT NUMERIC - new word: " + str(ww[wdc]))
                                        ww[wdc] = ww[wdc].replace(".", "")
                                else:
                                    #numeric
                                    pass

                                if ww[wdc].isdigit():
                                    if int(ww[wdc]) > 0:
                                        if wdc + 1 < len(ww) and 'au' in ww[
                                                wdc + 1].lower():
                                            if len(ww[wdc + 1]) == 2:
                                                #if the 'AU' is in 005AU vs 005 AU it will yield different results.
                                                ww[wdc] = ww[wdc] + 'AU'
                                                ww[wdc +
                                                   1] = '93939999919190933'
                                                logger.info(
                                                    "AU Detected seperate from issue - combining and continuing"
                                                )

                                fcdigit = helpers.issuedigits(ww[wdc])
                                if 'annual' in self.nzb_name.lower():
                                    logger.info("annual detected.")
                                    annchk = "yes"
                                    issuechk = myDB.action(
                                        "SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?",
                                        [cs['ComicID'], fcdigit]).fetchone()
                                else:
                                    issuechk = myDB.action(
                                        "SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?",
                                        [cs['ComicID'], fcdigit]).fetchone()

                                if issuechk is None:
                                    logger.info(
                                        "No corresponding issue # found for " +
                                        str(cs['ComicID']))
                                else:
                                    datematch = "True"
                                    if len(watchmatch) > 1:
                                        #if the # of matches is more than 1, we need to make sure we get the right series
                                        #compare the ReleaseDate for the issue, to the found issue date in the filename.
                                        #if ReleaseDate doesn't exist, use IssueDate
                                        #if no issue date was found, then ignore.
                                        if issuechk['ReleaseDate'] is not None:
                                            if int(issuechk['ReleaseDate'][:4]
                                                   ) < int(tmpfc['ComicYear']):
                                                logger.fdebug(
                                                    str(issuechk['ReleaseDate']
                                                        ) +
                                                    ' is before the issue year of '
                                                    + str(tmpfc['ComicYear']) +
                                                    ' that was discovered in the filename'
                                                )
                                                datematch = "False"
                                        else:
                                            if int(issuechk['IssueDate'][:4]
                                                   ) < int(tmpfc['ComicYear']):
                                                logger.fdebug(
                                                    str(issuechk['IssueDate'])
                                                    +
                                                    ' is before the issue year '
                                                    + str(tmpfc['ComicYear']) +
                                                    ' that was discovered in the filename'
                                                )
                                                datematch = "False"

                                    else:
                                        logger.info("Found matching issue # " +
                                                    str(fcdigit) +
                                                    " for ComicID: " +
                                                    str(cs['ComicID']) +
                                                    " / IssueID: " +
                                                    str(issuechk['IssueID']))

                                    if datematch == "True":
                                        manual_list.append({
                                            "ComicLocation":
                                            tmpfc['ComicLocation'],
                                            "ComicID":
                                            cs['ComicID'],
                                            "IssueID":
                                            issuechk['IssueID'],
                                            "IssueNumber":
                                            issuechk['Issue_Number'],
                                            "ComicName":
                                            cs['ComicName']
                                        })
                                    else:
                                        logger.fdebug(
                                            'Incorrect series - not populating..continuing post-processing'
                                        )
                                    ccnt += 1
                                    #print manual_list
                                wdc += 1
                            fn += 1
                logger.fdebug("There are " + str(len(manual_list)) +
                              " files found that match on your watchlist, " +
                              str(nm) +
                              " do not match anything and will be ignored.")