Ejemplo n.º 1
0
    def markissues(self, action=None, **args):
        myDB = db.DBConnection()
        issuesToAdd = []
        issuestoArchive = []
        if action == 'WantedNew':
            newaction = 'Wanted'
        else:
            newaction = action
        for IssueID in args:
            if IssueID is None: continue
            else:
                mi = myDB.action("SELECT * FROM issues WHERE IssueID=?",[IssueID]).fetchone()
                miyr = myDB.action("SELECT ComicYear FROM comics WHERE ComicID=?", [mi['ComicID']]).fetchone()
                if action == 'Downloaded':
                    if mi['Status'] == "Skipped" or mi['Status'] == "Wanted":
                        logger.info(u"Cannot change status to %s as comic is not Snatched or Downloaded" % (newaction))
                        continue
                elif action == 'Archived':
                    logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
                    #updater.forceRescan(mi['ComicID'])
                    issuestoArchive.append(IssueID)
                elif action == 'Wanted':
                    logger.info(u"Marking %s %s as %s" % (mi['ComicName'], mi['Issue_Number'], newaction))
                    issuesToAdd.append(IssueID)

                controlValueDict = {"IssueID": IssueID}
                newValueDict = {"Status": newaction}
                myDB.upsert("issues", newValueDict, controlValueDict)
        if len(issuestoArchive) > 0:
            updater.forceRescan(mi['ComicID'])
        if len(issuesToAdd) > 0:
            logger.debug("Marking issues: %s as Wanted" % issuesToAdd)
            threading.Thread(target=search.searchIssueIDList, args=[issuesToAdd]).start()
        #if IssueID:
        raise cherrypy.HTTPRedirect("artistPage?ComicID=%s" % mi['ComicID'])
Ejemplo n.º 2
0
def archivefiles(comicid, ogdir, ogcname):
    myDB = db.DBConnection()
    # if move files isn't enabled, let's set all found comics to Archive status :)
    result = myDB.select("SELECT * FROM importresults WHERE ComicName=?",
                         [ogcname])
    if result is None:
        pass
    else:
        scandir = []
        for res in result:
            if any(
                [os.path.dirname(res['ComicLocation']) in x for x in scandir]):
                pass
            else:
                scandir.append(os.path.dirname(res['ComicLocation']))

        for sdir in scandir:
            logger.info(
                'Updating issue information and setting status to Archived for location: '
                + sdir)
            updater.forceRescan(
                comicid,
                archive=sdir)  #send to rescanner with archive mode turned on

        logger.info('Now scanning in files.')
        updater.forceRescan(comicid)

    return
Ejemplo n.º 3
0
def archivefiles(comicid, comlocation, imported):
    myDB = db.DBConnection()
    # if move files isn't enabled, let's set all found comics to Archive status :)
    try:
        imported = ast.literal_eval(imported)
    except Exception as e:
        logger.warn('[%s] Error encountered converting import data' % e)

    ComicName = imported['ComicName']
    impres = imported['filelisting']

    if impres is not None:
        scandir = []
        for impr in impres:
            srcimp = impr['comiclocation']
            orig_filename = impr['comicfilename']

            if not any([
                    os.path.abspath(os.path.join(srcimp, os.pardir)) == x
                    for x in scandir
            ]):
                scandir.append(os.path.abspath(os.path.join(srcimp,
                                                            os.pardir)))

        for sdir in scandir:
            logger.info(
                'Updating issue information and setting status to Archived for location: '
                + sdir)
            updater.forceRescan(
                comicid,
                archive=sdir)  #send to rescanner with archive mode turned on

        logger.info('Now scanning in files.')
        updater.forceRescan(comicid)

        for result in impres:
            try:
                res = result['import_id']
            except:
                #if it's an 'older' import that wasn't imported, just make it a basic match so things can move and update properly.
                controlValue = {
                    "ComicFilename": result['comicfilename'],
                    "SRID": imported['srid']
                }
                newValue = {"Status": "Imported", "ComicID": comicid}
            else:
                controlValue = {
                    "impID": result['import_id'],
                    "ComicFilename": result['comicfilename']
                }
                newValue = {
                    "Status": "Imported",
                    "SRID": imported['srid'],
                    "ComicID": comicid
                }
            myDB.upsert("importresults", newValue, controlValue)

    return
Ejemplo n.º 4
0
def archivefiles(comicid, comlocation, imported):
    myDB = db.DBConnection()
    # if move files isn't enabled, let's set all found comics to Archive status :)
    try:
        imported = ast.literal_eval(imported)
    except ValueError:
        pass
    ComicName = imported['ComicName']
    impres = imported['filelisting']

    if impres is not None:
        scandir = []
        for impr in impres:
            srcimp = impr['comiclocation']
            orig_filename = impr['comicfilename']

            if not any([os.path.abspath(os.path.join(srcimp, os.pardir)) == x for x in scandir]):
                scandir.append(os.path.abspath(os.path.join(srcimp, os.pardir)))


        for sdir in scandir:
            logger.info('Updating issue information and setting status to Archived for location: ' + sdir)
            updater.forceRescan(comicid, archive=sdir) #send to rescanner with archive mode turned on

        logger.info('Now scanning in files.')
        updater.forceRescan(comicid)

        for result in impres:
            try:
                res = result['import_id']
            except:
                #if it's an 'older' import that wasn't imported, just make it a basic match so things can move and update properly.
                controlValue = {"ComicFilename": result['comicfilename'],
                                "SRID":          imported['srid']}
                newValue = {"Status":            "Imported",
                            "ComicID":           comicid}
            else:
                controlValue = {"impID":         result['import_id'],
                                "ComicFilename": result['comicfilename']}
                newValue = {"Status":            "Imported",
                            "SRID":              imported['srid'],
                            "ComicID":           comicid}
            myDB.upsert("importresults", newValue, controlValue)


    return
Ejemplo n.º 5
0
def archivefiles(comicid, ogdir, ogcname):
    myDB = db.DBConnection()
    # if move files isn't enabled, let's set all found comics to Archive status :)
    result = myDB.select("SELECT * FROM importresults WHERE ComicName=?", [ogcname])
    if result is None:
        pass
    else:
        scandir = []
        for res in result:
            if any([os.path.dirname(res['ComicLocation']) in x for x in scandir]):
                pass
            else:
                scandir.append(os.path.dirname(res['ComicLocation']))

        for sdir in scandir:
            logger.info('Updating issue information and setting status to Archived for location: ' + sdir)
            updater.forceRescan(comicid, archive=sdir) #send to rescanner with archive mode turned on

        logger.info('Now scanning in files.')
        updater.forceRescan(comicid)

    return
Ejemplo n.º 6
0
def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None, queue=None):

    if cron and not mylar.LIBRARYSCAN:
        return

    if not dir:
        dir = mylar.CONFIG.COMIC_DIR

    if not os.path.isdir(dir):
        logger.warn('Cannot find directory: %s. Not scanning' % dir)
        return "Fail"


    logger.info('Scanning comic directory: %s' % dir)

    basedir = dir

    comic_list = []
    failure_list = []
    utter_failure_list = []
    comiccnt = 0
    extensions = ('cbr','cbz')
    cv_location = []
    cbz_retry = 0

    mylar.IMPORT_STATUS = 'Now attempting to parse files for additional information'
    myDB = db.DBConnection()
    #mylar.IMPORT_PARSED_COUNT #used to count what #/totalfiles the filename parser is currently on
    for r, d, f in os.walk(dir):
        for files in f:
            mylar.IMPORT_FILES +=1
            if any(files.lower().endswith('.' + x.lower()) for x in extensions):
                comicpath = os.path.join(r, files)
                if mylar.CONFIG.IMP_PATHS is True:
                    if myDB.select('SELECT * FROM comics JOIN issues WHERE issues.Status="Downloaded" AND ComicLocation=? AND issues.Location=?', [r, files]):
                        logger.info('Skipped known issue path: %s' % comicpath)
                        continue

                comic = files
                if not os.path.exists(comicpath):
                    logger.fdebug(f'''Comic: {comic} doesn't actually exist - assuming it is a symlink to a nonexistant path.''')
                    continue

                comicsize = os.path.getsize(comicpath)
                logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' + str(comicsize) + ' bytes')

                try:
                    t = filechecker.FileChecker(dir=r, file=comic)
                    results = t.listFiles()

                    #logger.info(results)
                    #'type':           re.sub('\.','', filetype).strip(),
                    #'sub':            path_list,
                    #'volume':         volume,
                    #'match_type':     match_type,
                    #'comicfilename':  filename,
                    #'comiclocation':  clocation,
                    #'series_name':    series_name,
                    #'series_volume':  issue_volume,
                    #'series_year':    issue_year,
                    #'justthedigits':  issue_number,
                    #'annualcomicid':  annual_comicid,
                    #'scangroup':      scangroup}


                    if results:
                        resultline = '[PARSE-' + results['parse_status'].upper() + ']'
                        resultline += '[SERIES: ' + results['series_name'] + ']'
                        if results['series_volume'] is not None:
                            resultline += '[VOLUME: ' + results['series_volume'] + ']'
                        if results['issue_year'] is not None:
                            resultline += '[ISSUE YEAR: ' + str(results['issue_year']) + ']'
                        if results['issue_number'] is not None:
                            resultline += '[ISSUE #: ' + results['issue_number'] + ']'
                        logger.fdebug(resultline)
                    else:
                        logger.fdebug('[PARSED] FAILURE.')
                        continue

                    # We need the unicode path to use for logging, inserting into database
                    unicode_comic_path = comicpath

                    if results['parse_status'] == 'success':
                        comic_list.append({'ComicFilename':           comic,
                                           'ComicLocation':           comicpath,
                                           'ComicSize':               comicsize,
                                           'Unicode_ComicLocation':   unicode_comic_path,
                                           'parsedinfo':              {'series_name':    results['series_name'],
                                                                       'series_volume':  results['series_volume'],
                                                                       'issue_year':     results['issue_year'],
                                                                       'issue_number':   results['issue_number']}
                                           })
                        comiccnt +=1
                        mylar.IMPORT_PARSED_COUNT +=1
                    else:
                        failure_list.append({'ComicFilename':           comic,
                                             'ComicLocation':           comicpath,
                                             'ComicSize':               comicsize,
                                             'Unicode_ComicLocation':   unicode_comic_path,
                                             'parsedinfo':              {'series_name':    results['series_name'],
                                                                         'series_volume':  results['series_volume'],
                                                                         'issue_year':     results['issue_year'],
                                                                         'issue_number':   results['issue_number']}
                                           })
                        mylar.IMPORT_FAILURE_COUNT +=1
                        if comic.endswith('.cbz'):
                            cbz_retry +=1

                except Exception as e:
                    logger.info('bang')
                    utter_failure_list.append({'ComicFilename':           comic,
                                               'ComicLocation':           comicpath,
                                               'ComicSize':               comicsize,
                                               'Unicode_ComicLocation':   unicode_comic_path,
                                               'parsedinfo':              None,
                                               'error':                   e
                                             })
                    logger.info('[' + str(e) + '] FAILURE encountered. Logging the error for ' + comic + ' and continuing...')
                    mylar.IMPORT_FAILURE_COUNT +=1
                    if comic.endswith('.cbz'):
                        cbz_retry +=1
                    continue

            if 'cvinfo' in files:
                cv_location.append(r)
                logger.fdebug('CVINFO found: ' + os.path.join(r))

    mylar.IMPORT_TOTALFILES = comiccnt
    logger.info('I have successfully discovered & parsed a total of ' + str(comiccnt) + ' files....analyzing now')
    logger.info('I have not been able to determine what ' + str(len(failure_list)) + ' files are')
    logger.info('However, ' + str(cbz_retry) + ' out of the ' + str(len(failure_list)) + ' files are in a cbz format, which may contain metadata.')
    logger.info('[ERRORS] I have encountered ' + str(len(utter_failure_list)) + ' file-scanning errors during the scan, but have recorded the necessary information.')
    mylar.IMPORT_STATUS = 'Successfully parsed ' + str(comiccnt) + ' files'
    #return queue.put(valreturn)

    if len(utter_failure_list) > 0:
        logger.fdebug('Failure list: %s' % utter_failure_list)

    #let's load in the watchlist to see if we have any matches.
    logger.info("loading in the watchlist to see if a series is being watched already...")
    watchlist = myDB.select("SELECT * from comics")
    ComicName = []
    DisplayName = []
    ComicYear = []
    ComicPublisher = []
    ComicTotal = []
    ComicID = []
    ComicLocation = []

    AltName = []
    watchcnt = 0

    watch_kchoice = []
    watchchoice = {}
    import_by_comicids = []
    import_comicids = {}

    for watch in watchlist:
        #use the comicname_filesafe to start
        watchdisplaycomic = watch['ComicName']
        # let's clean up the name, just in case for comparison purposes...
        watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', watch['ComicName_Filesafe'])
        #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()

        if ' the ' in watchcomic.lower():
            #drop the 'the' from the watchcomic title for proper comparisons.
            watchcomic = watchcomic[-4:]

        alt_chk = "no" # alt-checker flag (default to no)

        # account for alternate names as well
        if watch['AlternateSearch'] is not None and watch['AlternateSearch'] != 'None':
            altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', watch['AlternateSearch'])
            #altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
            AltName.append(altcomic)
            alt_chk = "yes"  # alt-checker flag

        ComicName.append(watchcomic)
        DisplayName.append(watchdisplaycomic)
        ComicYear.append(watch['ComicYear'])
        ComicPublisher.append(watch['ComicPublisher'])
        ComicTotal.append(watch['Total'])
        ComicID.append(watch['ComicID'])
        ComicLocation.append(watch['ComicLocation'])
        watchcnt+=1

    logger.info("Successfully loaded " + str(watchcnt) + " series from your watchlist.")

    ripperlist=['digital-',
                'empire',
                'dcp']

    watchfound = 0

    datelist = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
#    datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
#    #search for number as text, and change to numeric
#    for numbs in basnumbs:
#        #logger.fdebug("numbs:" + str(numbs))
#        if numbs in ComicName.lower():
#            numconv = basnumbs[numbs]
#            #logger.fdebug("numconv: " + str(numconv))

    issueid_list = []
    cvscanned_loc = None
    cvinfo_CID = None
    cnt = 0
    mylar.IMPORT_STATUS = '[0%] Now parsing individual filenames for metadata if available'

    for i in comic_list:
        mylar.IMPORT_STATUS = '[' + str(cnt) + '/' + str(comiccnt) + '] Now parsing individual filenames for metadata if available'
        logger.fdebug('Analyzing : ' + i['ComicFilename'])
        comfilename = i['ComicFilename']
        comlocation = i['ComicLocation']
        issueinfo = None
        #probably need to zero these issue-related metadata to None so we can pick the best option
        issuevolume = None

        #Make sure cvinfo is checked for FIRST (so that CID can be attached to all files properly thereafter as they're scanned in)
        if os.path.dirname(comlocation) in cv_location and os.path.dirname(comlocation) != cvscanned_loc:

        #if comfilename == 'cvinfo':
            logger.info('comfilename: ' + comfilename)
            logger.info('cvscanned_loc: ' + str(cv_location))
            logger.info('comlocation: ' + os.path.dirname(comlocation))
            #if cvscanned_loc != comlocation:
            try:
                with open(os.path.join(os.path.dirname(comlocation), 'cvinfo')) as f:
                    urllink = f.readline()

                if urllink:
                    cid = urllink.strip()
                    pattern = re.compile(r"^.*?\b(49|4050)-(?P<num>\d{2,})\b.*$", re.I)
                    match = pattern.match(cid)
                    if match:
                        cvinfo_CID = match.group("num")
                        logger.info('CVINFO file located within directory. Attaching everything in directory that is valid to ComicID: ' + str(cvinfo_CID))
                        #store the location of the cvinfo so it's applied to the correct directory (since we're scanning multile direcorties usually)
                        cvscanned_loc = os.path.dirname(comlocation)
                else:
                    logger.error("Could not read cvinfo file properly (or it does not contain any data)")
            except (OSError, IOError):
                logger.error("Could not read cvinfo file properly (or it does not contain any data)")
        #else:
        #    don't scan in it again if it's already been done initially
        #    continue

        if mylar.CONFIG.IMP_METADATA:
            #if read tags is enabled during import, check here.
            if i['ComicLocation'].endswith('.cbz'):
                logger.fdebug('[IMPORT-CBZ] Metatagging checking enabled.')
                logger.info('[IMPORT-CBZ} Attempting to read tags present in filename: ' + i['ComicLocation'])
                try:
                    issueinfo = helpers.IssueDetails(i['ComicLocation'], justinfo=True)
                except:
                    logger.fdebug('[IMPORT-CBZ] Unable to retrieve metadata - possibly doesn\'t exist. Ignoring meta-retrieval')
                    pass
                else:
                    logger.info('issueinfo: ' + str(issueinfo))

                    if issueinfo is None or issueinfo['metadata'] is None:
                        logger.fdebug('[IMPORT-CBZ] No valid metadata contained within filename. Dropping down to parsing the filename itself.')
                        pass
                    else:
                        issuenotes_id = None
                        logger.info('[IMPORT-CBZ] Successfully retrieved some tags. Lets see what I can figure out.')
                        comicname = issueinfo['metadata']['series']
                        if comicname is not None:
                            logger.fdebug('[IMPORT-CBZ] Series Name: ' + comicname)
                            as_d = filechecker.FileChecker()
                            as_dyninfo = as_d.dynamic_replace(comicname)
                            logger.fdebug('Dynamic-ComicName: ' + as_dyninfo['mod_seriesname'])
                        else:
                            logger.fdebug('[IMPORT-CBZ] No series name found within metadata. This is bunk - dropping down to file parsing for usable information.')
                            issueinfo = None
                            issue_number = None

                        if issueinfo is not None:
                            try:
                                issueyear = issueinfo['metadata']['year']
                            except:
                                issueyear = None

                            #if the issue number is a non-numeric unicode string, this will screw up along with impID
                            issue_number = issueinfo['metadata']['issue_number']
                            if issue_number is not None:
                                logger.fdebug('[IMPORT-CBZ] Issue Number: ' + issue_number)
                            else:
                                issue_number = i['parsed']['issue_number']

                            if 'annual' in comicname.lower() or 'annual' in comfilename.lower():
                                if issue_number is None or issue_number == 'None':
                                    logger.info('Annual detected with no issue number present within metadata. Assuming year as issue.')
                                    try:
                                        issue_number = 'Annual ' + str(issueyear)
                                    except:
                                        issue_number = 'Annual ' + i['parsed']['issue_year']
                                else:
                                    logger.info('Annual detected with issue number present within metadata.')
                                    if 'annual' not in issue_number.lower():
                                        issue_number = 'Annual ' + issue_number
                                mod_series = re.sub('annual', '', comicname, flags=re.I).strip()
                            else:
                                mod_series = comicname

                            logger.fdebug('issue number SHOULD Be: ' + issue_number)

                            try:
                                issuetitle = issueinfo['metadata']['title']
                            except:
                                issuetitle = None
                            try:
                                issueyear = issueinfo['metadata']['year']
                            except:
                                issueyear = None
                            try:
                                issuevolume = str(issueinfo['metadata']['volume'])
                                if all([issuevolume is not None, issuevolume != 'None', not issuevolume.lower().startswith('v')]):
                                    issuevolume = 'v' + str(issuevolume)
                                if any([issuevolume is None, issuevolume == 'None']):
                                    logger.info('EXCEPT] issue volume is NONE')
                                    issuevolume = None
                                else:
                                    logger.fdebug('[TRY]issue volume is: ' + str(issuevolume))
                            except:
                                logger.fdebug('[EXCEPT]issue volume is: ' + str(issuevolume))
                                issuevolume = None

                            if any([comicname is None, comicname == 'None', issue_number is None, issue_number == 'None']):
                                logger.fdebug('[IMPORT-CBZ] Improperly tagged file as the metatagging is invalid. Ignoring meta and just parsing the filename.')
                                issueinfo = None
                                pass
                            else:
                                # if used by ComicTagger, Notes field will have the IssueID.
                                issuenotes = issueinfo['metadata']['notes']
                                logger.fdebug('[IMPORT-CBZ] Notes: ' + issuenotes)
                                if issuenotes is not None and issuenotes != 'None':
                                    if 'Issue ID' in issuenotes:
                                        st_find = issuenotes.find('Issue ID')
                                        tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
                                        if tmp_issuenotes_id.isdigit():
                                            issuenotes_id = tmp_issuenotes_id
                                            logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']')
                                    elif 'CVDB' in issuenotes:
                                        st_find = issuenotes.find('CVDB')
                                        tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
                                        if tmp_issuenotes_id.isdigit():
                                            issuenotes_id = tmp_issuenotes_id
                                            logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']')
                                    else:
                                        logger.fdebug('[IMPORT-CBZ] Unable to retrieve IssueID from meta-tagging. If there is other metadata present I will use that.')

                                logger.fdebug('[IMPORT-CBZ] Adding ' + comicname + ' to the import-queue!')
                                #impid = comicname + '-' + str(issueyear) + '-' + str(issue_number) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
                                impid = str(random.randint(1000000,99999999))
                                logger.fdebug('[IMPORT-CBZ] impid: ' + str(impid))
                                #make sure we only add in those issueid's which don't already have a comicid attached via the cvinfo scan above (this is for reverse-lookup of issueids)
                                issuepopulated = False
                                if cvinfo_CID is None:
                                    if issuenotes_id is None:
                                        logger.info('[IMPORT-CBZ] No ComicID detected where it should be. Bypassing this metadata entry and going the parsing route [' + comfilename + ']')
                                    else:
                                        #we need to store the impid here as well so we can look it up.
                                        issueid_list.append({'issueid':    issuenotes_id,
                                                             'importinfo': {'impid':       impid,
                                                                            'comicid':     None,
                                                                            'comicname':   comicname,
                                                                            'dynamicname': as_dyninfo['mod_seriesname'],
                                                                            'comicyear':   issueyear,
                                                                            'issuenumber': issue_number,
                                                                            'volume':      issuevolume,
                                                                            'comfilename': comfilename,
                                                                            'comlocation': comlocation}
                                                           })
                                        mylar.IMPORT_CID_COUNT +=1
                                        issuepopulated = True

                                if issuepopulated == False:
                                    if cvscanned_loc == os.path.dirname(comlocation):
                                        cv_cid = cvinfo_CID
                                        logger.fdebug('[IMPORT-CBZ] CVINFO_COMICID attached : ' + str(cv_cid))
                                    else:
                                        cv_cid = None
                                    import_by_comicids.append({
                                        "impid": impid,
                                        "comicid": cv_cid,
                                        "watchmatch": None,
                                        "displayname": mod_series,
                                        "comicname": comicname,
                                        "dynamicname": as_dyninfo['mod_seriesname'],
                                        "comicyear": issueyear,
                                        "issuenumber": issue_number,
                                        "volume": issuevolume,
                                        "issueid": issuenotes_id,
                                        "comfilename": comfilename,
                                        "comlocation": comlocation
                                                       })

                                    mylar.IMPORT_CID_COUNT +=1
                        else:
                            pass
                            #logger.fdebug(i['ComicFilename'] + ' is not in a metatagged format (cbz). Bypassing reading of the metatags')

        if issueinfo is None:
            if i['parsedinfo']['issue_number'] is None:
                if 'annual' in i['parsedinfo']['series_name'].lower():
                    logger.fdebug('Annual detected with no issue number present. Assuming year as issue.')##1 issue')
                    if i['parsedinfo']['issue_year'] is not None:
                        issuenumber = 'Annual ' + str(i['parsedinfo']['issue_year'])
                    else:
                        issuenumber = 'Annual 1'
            else:
                issuenumber = i['parsedinfo']['issue_number']

            if 'annual' in i['parsedinfo']['series_name'].lower():
                mod_series = re.sub('annual', '', i['parsedinfo']['series_name'], flags=re.I).strip()
                logger.fdebug('Annual detected with no issue number present. Assuming year as issue.')##1 issue')
                if i['parsedinfo']['issue_number'] is not None:
                    issuenumber = 'Annual ' + str(i['parsedinfo']['issue_number'])
                else:
                    if i['parsedinfo']['issue_year'] is not None:
                        issuenumber = 'Annual ' + str(i['parsedinfo']['issue_year'])
                    else:
                        issuenumber = 'Annual 1'
            else:
                mod_series = i['parsedinfo']['series_name']
                issuenumber = i['parsedinfo']['issue_number']


            logger.fdebug('[' + mod_series + '] Adding to the import-queue!')
            isd = filechecker.FileChecker()
            is_dyninfo = isd.dynamic_replace(mod_series) #helpers.conversion(mod_series))
            logger.fdebug('Dynamic-ComicName: ' + is_dyninfo['mod_seriesname'])

            #impid = dispname + '-' + str(result_comyear) + '-' + str(comiss) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
            impid = str(random.randint(1000000,99999999))
            logger.fdebug("impid: " + str(impid))
            if cvscanned_loc == os.path.dirname(comlocation):
                cv_cid = cvinfo_CID
                logger.fdebug('CVINFO_COMICID attached : ' + str(cv_cid))
            else:
                cv_cid = None

            if issuevolume is None:
                logger.fdebug('issue volume is : ' + str(issuevolume))
                if i['parsedinfo']['series_volume'] is None:
                    issuevolume = None
                else:
                    if str(i['parsedinfo']['series_volume'].lower()).startswith('v'):
                        issuevolume = i['parsedinfo']['series_volume']
                    else:
                        issuevolume = 'v' + str(i['parsedinfo']['series_volume'])
            else:
                logger.fdebug('issue volume not none : ' + str(issuevolume))
                if issuevolume.lower().startswith('v'):
                    issuevolume = issuevolume
                else:
                    issuevolume = 'v' + str(issuevolume)

            logger.fdebug('IssueVolume is : ' + str(issuevolume))

            import_by_comicids.append({
                "impid": impid,
                "comicid": cv_cid,
                "issueid": None,
                "watchmatch": None, #watchmatch (should be true/false if it already exists on watchlist)
                "displayname": mod_series,
                "comicname": i['parsedinfo']['series_name'],
                "dynamicname": is_dyninfo['mod_seriesname'].lower(),
                "comicyear": i['parsedinfo']['issue_year'],
                "issuenumber": issuenumber, #issuenumber,
                "volume": issuevolume,
                "comfilename": comfilename,
                "comlocation": comlocation #helpers.conversion(comlocation)
                                      })
        cnt+=1
    #logger.fdebug('import_by_ids: ' + str(import_by_comicids))

    #reverse lookup all of the gathered IssueID's in order to get the related ComicID
    reverse_issueids = []
    for x in issueid_list:
        reverse_issueids.append(x['issueid'])

    vals = []
    if len(reverse_issueids) > 0:
        mylar.IMPORT_STATUS = 'Now Reverse looking up ' + str(len(reverse_issueids)) + ' IssueIDs to get the ComicIDs'
        vals = mylar.cv.getComic(None, 'import', comicidlist=reverse_issueids)
        #logger.fdebug('vals returned:' + str(vals))

    if len(watch_kchoice) > 0:
        watchchoice['watchlist'] = watch_kchoice
        #logger.fdebug("watchchoice: " + str(watchchoice))

        logger.info("I have found " + str(watchfound) + " out of " + str(comiccnt) + " comics for series that are being watched.")
        wat = 0
        comicids = []

        if watchfound > 0:
            if mylar.CONFIG.IMP_MOVE:
                logger.info('You checked off Move Files...so that\'s what I am going to do') 
                #check to see if Move Files is enabled.
                #if not being moved, set the archive bit.
                logger.fdebug('Moving files into appropriate directory')
                while (wat < watchfound): 
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comlocation = watch_the_list['ComicLocation']
                    watch_comicid = watch_the_list['ComicID']
                    watch_comicname = watch_the_list['ComicName']
                    watch_comicyear = watch_the_list['ComicYear']
                    watch_comiciss = watch_the_list['ComicIssue']
                    logger.fdebug('ComicLocation: ' + watch_comlocation)
                    orig_comlocation = watch_the_list['OriginalLocation']
                    orig_filename = watch_the_list['OriginalFilename'] 
                    logger.fdebug('Orig. Location: ' + orig_comlocation)
                    logger.fdebug('Orig. Filename: ' + orig_filename)
                    #before moving check to see if Rename to Mylar structure is enabled.
                    if mylar.CONFIG.IMP_RENAME:
                        logger.fdebug('Renaming files according to configuration details : ' + str(mylar.CONFIG.FILE_FORMAT))
                        renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss)
                        nfilename = renameit['nfilename']

                        dst_path = os.path.join(watch_comlocation, nfilename)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    else:
                        logger.fdebug('Renaming files not enabled, keeping original filename(s)')
                        dst_path = os.path.join(watch_comlocation, orig_filename)

                    #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                    #src = os.path.join(, str(nfilename + ext))
                    logger.fdebug('I am going to move ' + orig_comlocation + ' to ' + dst_path)
                    try:
                        shutil.move(orig_comlocation, dst_path)
                    except (OSError, IOError):
                        logger.info("Failed to move directory - check directories and manually re-run.")
                    wat+=1
            else:
                # if move files isn't enabled, let's set all found comics to Archive status :)
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comicid = watch_the_list['ComicID']
                    watch_issue = watch_the_list['ComicIssue']
                    logger.fdebug('ComicID: ' + str(watch_comicid))
                    logger.fdebug('Issue#: ' + str(watch_issue))
                    issuechk = myDB.selectone("SELECT * from issues where ComicID=? AND INT_IssueNumber=?", [watch_comicid, watch_issue]).fetchone()
                    if issuechk is None:
                        logger.fdebug('No matching issues for this comic#')
                    else:
                        logger.fdebug('...Existing status: ' + str(issuechk['Status']))
                        control = {"IssueID":   issuechk['IssueID']}
                        values = {"Status":   "Archived"}
                        logger.fdebug('...changing status of ' + str(issuechk['Issue_Number']) + ' to Archived ')
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    wat+=1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd ):
                    logger.fdebug('Rescanning.. ' + str(c))
                    updater.forceRescan(c) 
        if not len(import_by_comicids):
            return "Completed"

    if len(import_by_comicids) > 0 or len(vals) > 0:
        #import_comicids['comic_info'] = import_by_comicids
        #if vals:
        #    import_comicids['issueid_info'] = vals
        #else:
        #    import_comicids['issueid_info'] = None
        if vals:
             cvimport_comicids = vals
             import_cv_ids = len(vals)
        else:
             cvimport_comicids = None
             import_cv_ids = 0
    else:
        import_cv_ids = 0
        cvimport_comicids = None
                    
    return {'import_by_comicids':  import_by_comicids, 
            'import_count':        len(import_by_comicids),
            'CV_import_comicids':  cvimport_comicids,
            'import_cv_ids':       import_cv_ids,
            'issueid_list':        issueid_list,
            'failure_list':        failure_list,
            'utter_failure_list':  utter_failure_list}
Ejemplo n.º 7
0
def libraryScan(dir=None,
                append=False,
                ComicID=None,
                ComicName=None,
                cron=None):

    if cron and not mylar.LIBRARYSCAN:
        return

    if not dir:
        dir = mylar.COMIC_DIR

    # If we're appending a dir, it's coming from the post processor which is
    # already bytestring
    if not append:
        dir = dir.encode(mylar.SYS_ENCODING)

    if not os.path.isdir(dir):
        logger.warn('Cannot find directory: %s. Not scanning' %
                    dir.decode(mylar.SYS_ENCODING, 'replace'))
        return

    logger.info('Scanning comic directory: %s' %
                dir.decode(mylar.SYS_ENCODING, 'replace'))

    basedir = dir

    comic_list = []
    comiccnt = 0
    extensions = ('cbr', 'cbz')
    for r, d, f in os.walk(dir):
        #for directory in d[:]:
        #    if directory.startswith("."):
        #        d.remove(directory)
        for files in f:
            if any(files.lower().endswith('.' + x.lower())
                   for x in extensions):
                comic = files
                comicpath = os.path.join(r, files)
                comicsize = os.path.getsize(comicpath)
                print "Comic: " + comic
                print "Comic Path: " + comicpath
                print "Comic Size: " + str(comicsize)

                # We need the unicode path to use for logging, inserting into database
                unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING,
                                                      'replace')

                comiccnt += 1
                comic_dict = {
                    'ComicFilename': comic,
                    'ComicLocation': comicpath,
                    'ComicSize': comicsize,
                    'Unicode_ComicLocation': unicode_comic_path
                }
                comic_list.append(comic_dict)

        logger.info("I've found a total of " + str(comiccnt) +
                    " comics....analyzing now")
        logger.info("comiclist: " + str(comic_list))
    myDB = db.DBConnection()

    #let's load in the watchlist to see if we have any matches.
    logger.info(
        "loading in the watchlist to see if a series is being watched already..."
    )
    watchlist = myDB.action("SELECT * from comics")
    ComicName = []
    ComicYear = []
    ComicPublisher = []
    ComicTotal = []
    ComicID = []
    ComicLocation = []

    AltName = []
    watchcnt = 0

    watch_kchoice = []
    watchchoice = {}
    import_by_comicids = []
    import_comicids = {}

    for watch in watchlist:
        # let's clean up the name, just in case for comparison purposes...
        watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ',
                            watch['ComicName']).encode('utf-8').strip()
        #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()

        if ' the ' in watchcomic.lower():
            #drop the 'the' from the watchcomic title for proper comparisons.
            watchcomic = watchcomic[-4:]

        alt_chk = "no"  # alt-checker flag (default to no)

        # account for alternate names as well
        if watch['AlternateSearch'] is not None and watch[
                'AlternateSearch'] is not 'None':
            altcomic = re.sub(
                '[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ',
                watch['AlternateSearch']).encode('utf-8').strip()
            #altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
            AltName.append(altcomic)
            alt_chk = "yes"  # alt-checker flag

        ComicName.append(watchcomic)
        ComicYear.append(watch['ComicYear'])
        ComicPublisher.append(watch['ComicPublisher'])
        ComicTotal.append(watch['Total'])
        ComicID.append(watch['ComicID'])
        ComicLocation.append(watch['ComicLocation'])
        watchcnt += 1

    logger.info("Successfully loaded " + str(watchcnt) +
                " series from your watchlist.")

    ripperlist = ['digital-', 'empire', 'dcp']

    watchfound = 0

    datelist = [
        'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
        'nov', 'dec'
    ]
    #    datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
    #    #search for number as text, and change to numeric
    #    for numbs in basnumbs:
    #        #print ("numbs:" + str(numbs))
    #        if numbs in ComicName.lower():
    #            numconv = basnumbs[numbs]
    #            #print ("numconv: " + str(numconv))

    for i in comic_list:
        print i['ComicFilename']

        comfilename = i['ComicFilename']
        comlocation = i['ComicLocation']
        #let's clean up the filename for matching purposes

        cfilename = re.sub('[\_\#\,\/\:\;\-\!\$\%\&\+\'\?\@]', ' ',
                           comfilename)
        #cfilename = re.sub('\s', '_', str(cfilename))

        #versioning - remove it
        subsplit = cfilename.replace('_', ' ').split()
        volno = None
        volyr = None
        for subit in subsplit:
            if subit[0].lower() == 'v':
                vfull = 0
                if subit[1:].isdigit():
                    #if in format v1, v2009 etc...
                    if len(subit) > 3:
                        # if it's greater than 3 in length, then the format is Vyyyy
                        vfull = 1  # add on 1 character length to account for extra space
                    cfilename = re.sub(subit, '', cfilename)
                    volno = re.sub("[^0-9]", " ", subit)
                elif subit.lower()[:3] == 'vol':
                    #if in format vol.2013 etc
                    #because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely
                    logger.fdebug('volume indicator detected as version #:' +
                                  str(subit))
                    cfilename = re.sub(subit, '', cfilename)
                    cfilename = " ".join(cfilename.split())
                    volyr = re.sub("[^0-9]", " ", subit).strip()
                    logger.fdebug('volume year set as : ' + str(volyr))
        cm_cn = 0

        #we need to track the counter to make sure we are comparing the right array parts
        #this takes care of the brackets :)
        m = re.findall('[^()]+', cfilename)
        lenm = len(m)
        logger.fdebug("there are " + str(lenm) + " words.")
        cnt = 0
        yearmatch = "false"
        foundonwatch = "False"
        issue = 999999

        while (cnt < lenm):
            if m[cnt] is None: break
            if m[cnt] == ' ':
                pass
            else:
                logger.fdebug(str(cnt) + ". Bracket Word: " + m[cnt])
                if cnt == 0:
                    comic_andiss = m[cnt]
                    logger.fdebug("Comic: " + comic_andiss)
                    # if it's not in the standard format this will bork.
                    # let's try to accomodate (somehow).
                    # first remove the extension (if any)
                    extensions = ('cbr', 'cbz')
                    if comic_andiss.lower().endswith(extensions):
                        comic_andiss = comic_andiss[:-4]
                        logger.fdebug("removed extension from filename.")
                    #now we have to break up the string regardless of formatting.
                    #let's force the spaces.
                    comic_andiss = re.sub('_', ' ', comic_andiss)
                    cs = comic_andiss.split()
                    cs_len = len(cs)
                    cn = ''
                    ydetected = 'no'
                    idetected = 'no'
                    decimaldetect = 'no'
                    for i in reversed(xrange(len(cs))):
                        #start at the end.
                        logger.fdebug("word: " + str(cs[i]))
                        #assume once we find issue - everything prior is the actual title
                        #idetected = no will ignore everything so it will assume all title
                        if cs[i][:-2] == '19' or cs[
                                i][:-2] == '20' and idetected == 'no':
                            logger.fdebug("year detected: " + str(cs[i]))
                            ydetected = 'yes'
                            result_comyear = cs[i]
                        elif cs[i].isdigit(
                        ) and idetected == 'no' or '.' in cs[i]:
                            issue = cs[i]
                            logger.fdebug("issue detected : " + str(issue))
                            idetected = 'yes'
                            if '.' in cs[i]:
                                #make sure it's a number on either side of decimal and assume decimal issue.
                                decst = cs[i].find('.')
                                dec_st = cs[i][:decst]
                                dec_en = cs[i][decst + 1:]
                                logger.fdebug("st: " + str(dec_st))
                                logger.fdebug("en: " + str(dec_en))
                                if dec_st.isdigit() and dec_en.isdigit():
                                    logger.fdebug(
                                        "decimal issue detected...adjusting.")
                                    issue = dec_st + "." + dec_en
                                    logger.fdebug("issue detected: " +
                                                  str(issue))
                                    idetected = 'yes'
                                else:
                                    logger.fdebug(
                                        "false decimal represent. Chunking to extra word."
                                    )
                                    cn = cn + cs[i] + " "
                                    break
                        elif '\#' in cs[i] or decimaldetect == 'yes':
                            logger.fdebug("issue detected: " + str(cs[i]))
                            idetected = 'yes'
                        else:
                            cn = cn + cs[i] + " "
                    if ydetected == 'no':
                        #assume no year given in filename...
                        result_comyear = "0000"
                    logger.fdebug("cm?: " + str(cn))
                    if issue is not '999999':
                        comiss = issue
                    else:
                        logger.ERROR(
                            "Invalid Issue number (none present) for " +
                            comfilename)
                        break
                    cnsplit = cn.split()
                    cname = ''
                    findcn = 0
                    while (findcn < len(cnsplit)):
                        cname = cname + cs[findcn] + " "
                        findcn += 1
                    cname = cname[:len(cname) - 1]  # drop the end space...
                    print("assuming name is : " + cname)
                    com_NAME = cname
                    print("com_NAME : " + com_NAME)
                    yearmatch = "True"
                else:
                    logger.fdebug('checking ' + m[cnt])
                    # we're assuming that the year is in brackets (and it should be damnit)
                    if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
                        print("year detected: " + str(m[cnt]))
                        ydetected = 'yes'
                        result_comyear = m[cnt]
                    elif m[cnt][:3].lower() in datelist:
                        logger.fdebug(
                            'possible issue date format given - verifying')
                        #if the date of the issue is given as (Jan 2010) or (January 2010) let's adjust.
                        #keeping in mind that ',' and '.' are already stripped from the string
                        if m[cnt][-4:].isdigit():
                            ydetected = 'yes'
                            result_comyear = m[cnt][-4:]
                            logger.fdebug('Valid Issue year of ' +
                                          str(result_comyear) +
                                          'detected in format of ' +
                                          str(m[cnt]))
            cnt += 1

        splitit = []
        watchcomic_split = []
        logger.fdebug("filename comic and issue: " + comic_andiss)

        #changed this from '' to ' '
        comic_iss_b4 = re.sub('[\-\:\,]', ' ', comic_andiss)
        comic_iss = comic_iss_b4.replace('.', ' ')
        comic_iss = re.sub('[\s+]', ' ', comic_iss).strip()
        logger.fdebug("adjusted comic and issue: " + str(comic_iss))
        #remove 'the' from here for proper comparisons.
        if ' the ' in comic_iss.lower():
            comic_iss = comic_iss[-4:]
        splitit = comic_iss.split(None)
        logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " +
                      str(comic_iss))
        #bmm = re.findall('v\d', comic_iss)
        #if len(bmm) > 0: splitst = len(splitit) - 2
        #else: splitst = len(splitit) - 1
        #-----
        #here we cycle through the Watchlist looking for a match.
        while (cm_cn < watchcnt):
            #setup the watchlist
            comname = ComicName[cm_cn]
            print("watch_comic:" + comname)
            comyear = ComicYear[cm_cn]
            compub = ComicPublisher[cm_cn]
            comtotal = ComicTotal[cm_cn]
            comicid = ComicID[cm_cn]
            watch_location = ComicLocation[cm_cn]

            #            if splitit[(len(splitit)-1)].isdigit():
            #                #compares - if the last digit and second last digit are #'s seperated by spaces assume decimal
            #                comic_iss = splitit[(len(splitit)-1)]
            #                splitst = len(splitit) - 1
            #                if splitit[(len(splitit)-2)].isdigit():
            #                    # for series that have a digit at the end, it screws up the logistics.
            #                    i = 1
            #                    chg_comic = splitit[0]
            #                    while (i < (len(splitit)-1)):
            #                        chg_comic = chg_comic + " " + splitit[i]
            #                        i+=1
            #                    logger.fdebug("chg_comic:" + str(chg_comic))
            #                    if chg_comic.upper() == comname.upper():
            #                        logger.fdebug("series contains numerics...adjusting..")
            #                    else:
            #                        changeup = "." + splitit[(len(splitit)-1)]
            #                        logger.fdebug("changeup to decimal: " + str(changeup))
            #                        comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss
            #                        splitst = len(splitit) - 2
            #            else:
            # if the nzb name doesn't follow the series-issue-year format even closely..ignore nzb
            #               logger.fdebug("invalid naming format of filename detected - cannot properly determine issue")
            #               continue

            # make sure that things like - in watchcomic are accounted for when comparing to nzb.

            # there shouldn't be an issue in the comic now, so let's just assume it's all gravy.
            splitst = len(splitit)
            watchcomic_split = helpers.cleanName(comname)
            watchcomic_split = re.sub('[\-\:\,\.]', ' ',
                                      watchcomic_split).split(None)

            logger.fdebug(
                str(splitit) + " file series word count: " + str(splitst))
            logger.fdebug(
                str(watchcomic_split) + " watchlist word count: " +
                str(len(watchcomic_split)))
            if (splitst) != len(watchcomic_split):
                logger.fdebug("incorrect comic lengths...not a match")
#                if str(splitit[0]).lower() == "the":
#                    logger.fdebug("THE word detected...attempting to adjust pattern matching")
#                    splitit[0] = splitit[4:]
            else:
                logger.fdebug("length match..proceeding")
                n = 0
                scount = 0
                logger.fdebug("search-length: " + str(splitst))
                logger.fdebug("Watchlist-length: " +
                              str(len(watchcomic_split)))
                while (n <= (splitst) - 1):
                    logger.fdebug("splitit: " + str(splitit[n]))
                    if n < (splitst) and n < len(watchcomic_split):
                        logger.fdebug(
                            str(n) + " Comparing: " +
                            str(watchcomic_split[n]) + " .to. " +
                            str(splitit[n]))
                        if '+' in watchcomic_split[n]:
                            watchcomic_split[n] = re.sub(
                                '+', '', str(watchcomic_split[n]))
                        if str(watchcomic_split[n].lower()) in str(
                                splitit[n].lower()) and len(
                                    watchcomic_split[n]) >= len(splitit[n]):
                            logger.fdebug("word matched on : " +
                                          str(splitit[n]))
                            scount += 1
                        #elif ':' in splitit[n] or '-' in splitit[n]:
                        #    splitrep = splitit[n].replace('-', '')
                        #    print ("non-character keyword...skipped on " + splitit[n])
                    elif str(splitit[n]).lower().startswith('v'):
                        logger.fdebug("possible versioning..checking")
                        #we hit a versioning # - account for it
                        if splitit[n][1:].isdigit():
                            comicversion = str(splitit[n])
                            logger.fdebug("version found: " +
                                          str(comicversion))
                    else:
                        logger.fdebug("Comic / Issue section")
                        if splitit[n].isdigit():
                            logger.fdebug("issue detected")
                            #comiss = splitit[n]
#                            comicNAMER = n - 1
#                            com_NAME = splitit[0]
#                           cmnam = 1
#                            while (cmnam <= comicNAMER):
#                                com_NAME = str(com_NAME) + " " + str(splitit[cmnam])
#                                cmnam+=1
#                            logger.fdebug("comic: " + str(com_NAME))
                        else:
                            logger.fdebug("non-match for: " + str(splitit[n]))
                            pass
                    n += 1
                #set the match threshold to 80% (for now)
                # if it's less than 80% consider it a non-match and discard.
                #splitit has to splitit-1 because last position is issue.
                wordcnt = int(scount)
                logger.fdebug("scount:" + str(wordcnt))
                totalcnt = int(splitst)
                logger.fdebug("splitit-len:" + str(totalcnt))
                spercent = (wordcnt / totalcnt) * 100
                logger.fdebug("we got " + str(spercent) + " percent.")
                if int(spercent) >= 80:
                    logger.fdebug("it's a go captain... - we matched " +
                                  str(spercent) + "%!")
                    logger.fdebug("this should be a match!")
                    #                    if '.' in comic_iss:
                    #                        comisschk_find = comic_iss.find('.')
                    #                        comisschk_b4dec = comic_iss[:comisschk_find]
                    #                        comisschk_decval = comic_iss[comisschk_find+1:]
                    #                        logger.fdebug("Found IssueNumber: " + str(comic_iss))
                    #                        logger.fdebug("..before decimal: " + str(comisschk_b4dec))
                    #                        logger.fdebug("...after decimal: " + str(comisschk_decval))
                    #                        #--let's make sure we don't wipe out decimal issues ;)
                    #                        if int(comisschk_decval) == 0:
                    #                            ciss = comisschk_b4dec
                    #                            cintdec = int(comisschk_decval)
                    #                        else:
                    #                            if len(comisschk_decval) == 1:
                    #                                ciss = comisschk_b4dec + "." + comisschk_decval
                    #                                cintdec = int(comisschk_decval) * 10
                    #                            else:
                    #                                ciss = comisschk_b4dec + "." + comisschk_decval.rstrip('0')
                    #                                cintdec = int(comisschk_decval.rstrip('0')) * 10
                    #                        comintIss = (int(comisschk_b4dec) * 1000) + cintdec
                    #                    else:
                    #                        comintIss = int(comic_iss) * 1000
                    logger.fdebug("issue we found for is : " + str(comiss))
                    #set the year to the series we just found ;)
                    result_comyear = comyear
                    #issue comparison now as well
                    logger.info(u"Found " + comname + " (" + str(comyear) +
                                ") issue: " + str(comiss))
                    #                    watchfound+=1
                    watchmatch = str(comicid)
                    #                    watch_kchoice.append({
                    #                       "ComicID":         str(comicid),
                    #                       "ComicName":       str(comname),
                    #                       "ComicYear":       str(comyear),
                    #                       "ComicIssue":      str(int(comic_iss)),
                    #                       "ComicLocation":   str(watch_location),
                    #                       "OriginalLocation" : str(comlocation),
                    #                       "OriginalFilename" : str(comfilename)
                    #                                        })
                    foundonwatch = "True"
                    break
                elif int(spercent) < 80:
                    logger.fdebug("failure - we only got " + str(spercent) +
                                  "% right!")
            cm_cn += 1

        if foundonwatch == "False":
            watchmatch = None
        #---if it's not a match - send it to the importer.
        n = 0
        #        print ("comic_andiss : " + str(comic_andiss))
        #        csplit = comic_andiss.split(None)
        #        while ( n <= (len(csplit)-1) ):
        #            print ("csplit:" + str(csplit[n]))
        #            if csplit[n].isdigit():
        #                logger.fdebug("issue detected")
        #                comiss = splitit[n]
        #                logger.fdebug("issue # : " + str(comiss))
        #                comicNAMER = n - 1
        #                com_NAME = csplit[0]
        #                cmnam = 1
        #                while (cmnam <= comicNAMER):
        #                    com_NAME = str(com_NAME) + " " + str(csplit[cmnam])
        #                    cmnam+=1
        #                logger.fdebug("comic: " + str(com_NAME))
        #            n+=1
        if volyr is None:
            if result_comyear is None:
                result_comyear = '0000'  #no year in filename basically.
        else:
            if result_comyear is None:
                result_comyear = volyr
        if volno is None:
            if volyr is None:
                vol_label = None
            else:
                vol_label = volyr
        else:
            vol_label = volno

        print("adding " + com_NAME + " to the import-queue!")
        impid = com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
        print("impid: " + str(impid))
        import_by_comicids.append({
            "impid":
            impid,
            "watchmatch":
            watchmatch,
            "comicname":
            com_NAME,
            "comicyear":
            result_comyear,
            "volume":
            vol_label,
            "comfilename":
            comfilename,
            "comlocation":
            comlocation.decode(mylar.SYS_ENCODING)
        })

    if len(watch_kchoice) > 0:
        watchchoice['watchlist'] = watch_kchoice
        print("watchchoice: " + str(watchchoice))

        logger.info("I have found " + str(watchfound) + " out of " +
                    str(comiccnt) +
                    " comics for series that are being watched.")
        wat = 0
        comicids = []

        if watchfound > 0:
            if mylar.IMP_MOVE:
                logger.info(
                    "You checked off Move Files...so that's what I'm going to do"
                )
                #check to see if Move Files is enabled.
                #if not being moved, set the archive bit.
                print("Moving files into appropriate directory")
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comlocation = watch_the_list['ComicLocation']
                    watch_comicid = watch_the_list['ComicID']
                    watch_comicname = watch_the_list['ComicName']
                    watch_comicyear = watch_the_list['ComicYear']
                    watch_comiciss = watch_the_list['ComicIssue']
                    print("ComicLocation: " + str(watch_comlocation))
                    orig_comlocation = watch_the_list['OriginalLocation']
                    orig_filename = watch_the_list['OriginalFilename']
                    print("Orig. Location: " + str(orig_comlocation))
                    print("Orig. Filename: " + str(orig_filename))
                    #before moving check to see if Rename to Mylar structure is enabled.
                    if mylar.IMP_RENAME:
                        print(
                            "Renaming files according to configuration details : "
                            + str(mylar.FILE_FORMAT))
                        renameit = helpers.rename_param(
                            watch_comicid, watch_comicname, watch_comicyear,
                            watch_comiciss)
                        nfilename = renameit['nfilename']

                        dst_path = os.path.join(watch_comlocation, nfilename)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    else:
                        print(
                            "Renaming files not enabled, keeping original filename(s)"
                        )
                        dst_path = os.path.join(watch_comlocation,
                                                orig_filename)

                    #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                    #src = os.path.join(, str(nfilename + ext))
                    print("I'm going to move " + str(orig_comlocation) +
                          " to .." + str(dst_path))
                    try:
                        shutil.move(orig_comlocation, dst_path)
                    except (OSError, IOError):
                        logger.info(
                            "Failed to move directory - check directories and manually re-run."
                        )
                    wat += 1
            else:
                # if move files isn't enabled, let's set all found comics to Archive status :)
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comicid = watch_the_list['ComicID']
                    watch_issue = watch_the_list['ComicIssue']
                    print("ComicID: " + str(watch_comicid))
                    print("Issue#: " + str(watch_issue))
                    issuechk = myDB.action(
                        "SELECT * from issues where ComicID=? AND INT_IssueNumber=?",
                        [watch_comicid, watch_issue]).fetchone()
                    if issuechk is None:
                        print("no matching issues for this comic#")
                    else:
                        print("...Existing status: " + str(issuechk['Status']))
                        control = {"IssueID": issuechk['IssueID']}
                        values = {"Status": "Archived"}
                        print("...changing status of " +
                              str(issuechk['Issue_Number']) + " to Archived ")
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    wat += 1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd):
                    print("Rescanning.. " + str(c))
                    updater.forceRescan(c)
        if not len(import_by_comicids):
            return "Completed"
    if len(import_by_comicids) > 0:
        import_comicids['comic_info'] = import_by_comicids
        print("import comicids: " + str(import_by_comicids))
        return import_comicids, len(import_by_comicids)
Ejemplo n.º 8
0
    def Process(self):
            self._log("nzb name: " + str(self.nzb_name), logger.DEBUG)
            self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
            logger.fdebug("nzb name: " + str(self.nzb_name))
            logger.fdebug("nzb folder: " + str(self.nzb_folder))
            if mylar.USE_SABNZBD==0:
                logger.fdebug("Not using SABNzbd")
            else:
                # if the SAB Directory option is enabled, let's use that folder name and append the jobname.
                if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4:
                    self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
    
                #lookup nzb_name in nzblog table to get issueid
    
                #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
                #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
                querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
                #logger.info("querysab_string:" + str(querysab))
                file = urllib2.urlopen(querysab)
                data = file.read()
                file.close()
                dom = parseString(data)
    
                sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
                sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
                logger.fdebug("SAB Replace Spaces: " + str(sabreps))
                logger.fdebug("SAB Replace Dots: " + str(sabrepd))
            if mylar.USE_NZBGET==1:
                logger.fdebug("Using NZBGET")
                logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
            myDB = db.DBConnection()

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext, logger.DEBUG)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:\?]', '', str(nzbname))
            nzbname = re.sub('[\&]', 'and', str(nzbname))

            logger.fdebug("After conversions, nzbname is : " + str(nzbname))
#            if mylar.USE_NZBGET==1:
#                nzbname=self.nzb_name
            self._log("nzbname: " + str(nzbname), logger.DEBUG)

            nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()

            if nzbiss is None:
                self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG)
                logger.fdebug("Failure - could not locate nzbfile initially.")
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG)
                logger.fdebug("trying again with nzbname of : " + str(nzbname))
                nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.")
                    return
                else:
                    self._log("I corrected and found the nzb as : " + str(nzbname))
                    logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname))
                    issueid = nzbiss['IssueID']
            else: 
                issueid = nzbiss['IssueID']
                print "issueid:" + str(issueid)
                #use issueid to get publisher, series, year, issue number
            issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
            if helpers.is_number(issueid):
                sandwich = int(issuenzb['IssueID'])
            else:
                #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
                #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
                sandwich = 1
            if issuenzb is None or sandwich >= 900000:
                # this has no issueID, therefore it's a one-off or a manual post-proc.
                # At this point, let's just drop it into the Comic Location folder and forget about it..
                self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG)
                logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.")
                self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)
                for root, dirnames, filenames in os.walk(self.nzb_folder):
                    for filename in filenames:
                        if filename.lower().endswith(extensions):
                            ofilename = filename
                            path, ext = os.path.splitext(ofilename)

                if mylar.GRABBAG_DIR:
                    grdst = mylar.GRABBAG_DIR
                else:
                    grdst = mylar.DESTINATION_DIR

                grab_dst = os.path.join(grdst, ofilename)
                self._log("Destination Path : " + grab_dst, logger.DEBUG)
                grab_src = os.path.join(self.nzb_folder, ofilename)
                self._log("Source Path : " + grab_src, logger.DEBUG)
                logger.info("Moving " + str(ofilename) + " into grab-bag directory : " + str(grdst))

                try:
                    shutil.move(grab_src, grab_dst)
                except (OSError, IOError):
                    self.log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                    logger.debug("Failed to move directory - check directories and manually re-run.")
                    return self.log
                #tidyup old path
                try:
                    shutil.rmtree(self.nzb_folder)
                except (OSError, IOError):
                    self._log("Failed to remove temporary directory.", logger.DEBUG)
                    logger.debug("Failed to remove temporary directory - check directory and manually re-run.")
                    return self.log

                logger.debug("Removed temporary directory : " + str(self.nzb_folder))
                self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG)
                #delete entry from nzblog table
                myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                return self.log

            comicid = issuenzb['ComicID']
            issuenumOG = issuenzb['Issue_Number']
            #issueno = str(issuenum).split('.')[0]
            #new CV API - removed all decimals...here we go AGAIN!
            issuenum = issuenumOG
            issue_except = 'None'
            if 'au' in issuenum.lower():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = ' AU'
            if '.' in issuenum:
                iss_find = issuenum.find('.')
                iss_b4dec = issuenum[:iss_find]
                iss_decval = issuenum[iss_find+1:]
                if int(iss_decval) == 0:
                    iss = iss_b4dec
                    issdec = int(iss_decval)
                    issueno = str(iss)
                    self._log("Issue Number: " + str(issueno), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(issueno))
                else:
                    if len(iss_decval) == 1:
                        iss = iss_b4dec + "." + iss_decval
                        issdec = int(iss_decval) * 10
                    else:
                        iss = iss_b4dec + "." + iss_decval.rstrip('0')
                        issdec = int(iss_decval.rstrip('0')) * 10
                    issueno = iss_b4dec
                    self._log("Issue Number: " + str(iss), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(iss))
            else:
                iss = issuenum
                issueno = str(iss)
            # issue zero-suppression here
            if mylar.ZERO_LEVEL == "0": 
                zeroadd = ""
            else:
                if mylar.ZERO_LEVEL_N  == "none": zeroadd = ""
                elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
                elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"

            logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N))

            if str(len(issueno)) > 1:
                if int(issueno) < 10:
                    self._log("issue detected less than 10", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                            prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None': 
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                elif int(issueno) >= 10 and int(issueno) < 100:
                    self._log("issue detected greater than 10, but less than 100", logger.DEBUG)
                    if mylar.ZERO_LEVEL_N == "none":
                        zeroadd = ""
                    else:
                        zeroadd = "0"
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                           prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                else:
                    self._log("issue detected greater than 100", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                    prettycomiss = str(issueno)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
            else:
                prettycomiss = str(issueno)
                self._log("issue length error - cannot determine length. Defaulting to None:  " + str(prettycomiss), logger.DEBUG)

            logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
            issueyear = issuenzb['IssueDate'][:4]
            self._log("Issue Year: " + str(issueyear), logger.DEBUG)
            logger.fdebug("Issue Year : " + str(issueyear))
            comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            publisher = comicnzb['ComicPublisher']
            self._log("Publisher: " + publisher, logger.DEBUG)
            logger.fdebug("Publisher: " + str(publisher))
            #we need to un-unicode this to make sure we can write the filenames properly for spec.chars
            series = comicnzb['ComicName'].encode('ascii', 'ignore').strip()
            self._log("Series: " + series, logger.DEBUG)
            logger.fdebug("Series: " + str(series))
            seriesyear = comicnzb['ComicYear']
            self._log("Year: " + seriesyear, logger.DEBUG)
            logger.fdebug("Year: "  + str(seriesyear))
            comlocation = comicnzb['ComicLocation']
            self._log("Comic Location: " + comlocation, logger.DEBUG)
            logger.fdebug("Comic Location: " + str(comlocation))
            comversion = comicnzb['ComicVersion']
            self._log("Comic Version: " + str(comversion), logger.DEBUG)
            logger.fdebug("Comic Version: " + str(comversion))
            if comversion is None:
                comversion = 'None'
            #if comversion is None, remove it so it doesn't populate with 'None'
            if comversion == 'None':
                chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
                chunk_f = re.compile(r'\s+')
                chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG)
                logger.fdebug("No version # found for series, removing from filename")
                logger.fdebug("new format is now: " + str(chunk_file_format))
            else:
                chunk_file_format = mylar.FILE_FORMAT
            #Run Pre-script

            if mylar.ENABLE_PRE_SCRIPTS:
                nzbn = self.nzb_name #original nzb name
                nzbf = self.nzb_folder #original nzb folder
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_pre_scripts(nzbn, nzbf, seriesmetadata )

        #rename file and move to new path
        #nfilename = series + " " + issueno + " (" + seriesyear + ")"

            file_values = {'$Series':    series,
                           '$Issue':     prettycomiss,
                           '$Year':      issueyear,
                           '$series':    series.lower(),
                           '$Publisher': publisher,
                           '$publisher': publisher.lower(),
                           '$VolumeY':   'V' + str(seriesyear),
                           '$VolumeN':   comversion
                          }

            for root, dirnames, filenames in os.walk(self.nzb_folder):
                for filename in filenames:
                    if filename.lower().endswith(extensions):
                        ofilename = filename
                        path, ext = os.path.splitext(ofilename)
            self._log("Original Filename: " + ofilename, logger.DEBUG)
            self._log("Original Extension: " + ext, logger.DEBUG)
            logger.fdebug("Original Filname: " + str(ofilename))
            logger.fdebug("Original Extension: " + str(ext))

            if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES:
                self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG)
                logger.fdebug("Rename Files isn't enabled - keeping original filename.")
                #check if extension is in nzb_name - will screw up otherwise
                if ofilename.lower().endswith(extensions):
                    nfilename = ofilename[:-4]
                else:
                    nfilename = ofilename
            else:
                nfilename = helpers.replace_all(chunk_file_format, file_values)
                if mylar.REPLACE_SPACES:
                    #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                    nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
            nfilename = re.sub('[\,\:\?]', '', nfilename)
            self._log("New Filename: " + nfilename, logger.DEBUG)
            logger.fdebug("New Filename: " + str(nfilename))

            src = os.path.join(self.nzb_folder, ofilename)

            filechecker.validateAndCreateDirectory(comlocation, True)

            if mylar.LOWERCASE_FILENAMES:
                dst = (comlocation + "/" + nfilename + ext).lower()
            else:
                dst = comlocation + "/" + nfilename + ext.lower()    
            self._log("Source:" + src, logger.DEBUG)
            self._log("Destination:" +  dst, logger.DEBUG)
            logger.fdebug("Source: " + str(src))
            logger.fdebug("Destination: " + str(dst))

            os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
            src = os.path.join(self.nzb_folder, str(nfilename + ext))
            try:
                shutil.move(src, dst)
            except (OSError, IOError):
                self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return
            #tidyup old path
            try:
                shutil.rmtree(self.nzb_folder)
            except (OSError, IOError):
                self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return

            self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
                    #delete entry from nzblog table
            myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                    #force rescan of files
            updater.forceRescan(comicid)
            logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) )
            self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)

            if mylar.PROWL_ENABLED:
                pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG
                logger.info(u"Prowl request")
                prowl = notifiers.PROWL()
                prowl.notify(pushmessage,"Download and Postprocessing completed")

            if mylar.NMA_ENABLED:
                nma = notifiers.NMA()
                nma.notify(series, str(issueyear), str(issuenumOG))

            if mylar.PUSHOVER_ENABLED:
                pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG)
                logger.info(u"Pushover request")
                pushover = notifiers.PUSHOVER()
                pushover.notify(pushmessage, "Download and Post-Processing completed")
             
            # retrieve/create the corresponding comic objects

            if mylar.ENABLE_EXTRA_SCRIPTS:
                folderp = str(dst) #folder location after move/rename
                nzbn = self.nzb_name #original nzb name
                filen = str(nfilename + ext) #new filename
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata )

            return self.log
Ejemplo n.º 9
0
                    else:
                        logger.fdebug('...Existing status: ' + str(issuechk['Status']))
                        control = {"IssueID":   issuechk['IssueID']}
                        values = {"Status":   "Archived"}
                        logger.fdebug('...changing status of ' + str(issuechk['Issue_Number']) + ' to Archived ')
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    wat+=1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd ):
                    logger.fdebug('Rescanning.. ' + str(c))
                    updater.forceRescan(c) 
        if not len(import_by_comicids):
            return "Completed"

    if len(import_by_comicids) > 0 or len(vals) > 0:
        #import_comicids['comic_info'] = import_by_comicids
        #if vals:
        #    import_comicids['issueid_info'] = vals
        #else:
        #    import_comicids['issueid_info'] = None
        if vals:
             cvimport_comicids = vals
             import_cv_ids = len(vals)
        else:
             cvimport_comicids = None
             import_cv_ids = 0
Ejemplo n.º 10
0
    def Process_next(self, comicid, issueid, issuenumOG, ml=None):
        annchk = "no"
        extensions = ('.cbr', '.cbz')
        myDB = db.DBConnection()
        comicnzb = myDB.action("SELECT * from comics WHERE comicid=?",
                               [comicid]).fetchone()
        issuenzb = myDB.action(
            "SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL",
            [issueid, comicid]).fetchone()
        logger.fdebug('issueid: ' + str(issueid))
        logger.fdebug('issuenumOG: ' + str(issuenumOG))
        if issuenzb is None:
            issuenzb = myDB.action(
                "SELECT * from annuals WHERE issueid=? and comicid=?",
                [issueid, comicid]).fetchone()
            annchk = "yes"
        #issueno = str(issuenum).split('.')[0]
        #new CV API - removed all decimals...here we go AGAIN!
        issuenum = issuenzb['Issue_Number']
        issue_except = 'None'

        if 'au' in issuenum.lower() and issuenum[:1].isdigit():
            issuenum = re.sub("[^0-9]", "", issuenum)
            issue_except = ' AU'
        elif 'ai' in issuenum.lower() and issuenum[:1].isdigit():
            issuenum = re.sub("[^0-9]", "", issuenum)
            issue_except = ' AI'
        elif 'inh' in issuenum.lower() and issuenum[:1].isdigit():
            issuenum = re.sub("[^0-9]", "", issuenum)
            issue_except = '.INH'
        elif 'now' in issuenum.lower() and issuenum[:1].isdigit():
            if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
            issuenum = re.sub("[^0-9]", "", issuenum)
            issue_except = '.NOW'

        if '.' in issuenum:
            iss_find = issuenum.find('.')
            iss_b4dec = issuenum[:iss_find]
            iss_decval = issuenum[iss_find + 1:]
            if int(iss_decval) == 0:
                iss = iss_b4dec
                issdec = int(iss_decval)
                issueno = str(iss)
                self._log("Issue Number: " + str(issueno), logger.DEBUG)
                logger.fdebug("Issue Number: " + str(issueno))
            else:
                if len(iss_decval) == 1:
                    iss = iss_b4dec + "." + iss_decval
                    issdec = int(iss_decval) * 10
                else:
                    iss = iss_b4dec + "." + iss_decval.rstrip('0')
                    issdec = int(iss_decval.rstrip('0')) * 10
                issueno = iss_b4dec
                self._log("Issue Number: " + str(iss), logger.DEBUG)
                logger.fdebug("Issue Number: " + str(iss))
        else:
            iss = issuenum
            issueno = str(iss)

        # issue zero-suppression here
        if mylar.ZERO_LEVEL == "0":
            zeroadd = ""
        else:
            if mylar.ZERO_LEVEL_N == "none": zeroadd = ""
            elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
            elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"

        logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N))

        if str(len(issueno)) > 1:
            if int(issueno) < 10:
                self._log("issue detected less than 10", logger.DEBUG)
                if '.' in iss:
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                        prettycomiss = str(zeroadd) + str(iss)
                    else:
                        prettycomiss = str(zeroadd) + str(int(issueno))
                else:
                    prettycomiss = str(zeroadd) + str(iss)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                self._log(
                    "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) +
                    ". Issue will be set as : " + str(prettycomiss),
                    logger.DEBUG)
            elif int(issueno) >= 10 and int(issueno) < 100:
                self._log("issue detected greater than 10, but less than 100",
                          logger.DEBUG)
                if mylar.ZERO_LEVEL_N == "none":
                    zeroadd = ""
                else:
                    zeroadd = "0"
                if '.' in iss:
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                        prettycomiss = str(zeroadd) + str(iss)
                    else:
                        prettycomiss = str(zeroadd) + str(int(issueno))
                else:
                    prettycomiss = str(zeroadd) + str(iss)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                self._log(
                    "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) +
                    ".Issue will be set as : " + str(prettycomiss),
                    logger.DEBUG)
            else:
                self._log("issue detected greater than 100", logger.DEBUG)
                if '.' in iss:
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                prettycomiss = str(issueno)
                if issue_except != 'None':
                    prettycomiss = str(prettycomiss) + issue_except
                self._log(
                    "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) +
                    ". Issue will be set as : " + str(prettycomiss),
                    logger.DEBUG)
        else:
            prettycomiss = str(issueno)
            self._log(
                "issue length error - cannot determine length. Defaulting to None:  "
                + str(prettycomiss), logger.DEBUG)

        if annchk == "yes":
            self._log("Annual detected.")
        logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
        issueyear = issuenzb['IssueDate'][:4]
        self._log("Issue Year: " + str(issueyear), logger.DEBUG)
        logger.fdebug("Issue Year : " + str(issueyear))
        month = issuenzb['IssueDate'][5:7].replace('-', '').strip()
        month_name = helpers.fullmonth(month)
        #            comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
        publisher = comicnzb['ComicPublisher']
        self._log("Publisher: " + publisher, logger.DEBUG)
        logger.fdebug("Publisher: " + str(publisher))
        #we need to un-unicode this to make sure we can write the filenames properly for spec.chars
        series = comicnzb['ComicName'].encode('ascii', 'ignore').strip()
        self._log("Series: " + series, logger.DEBUG)
        logger.fdebug("Series: " + str(series))
        seriesyear = comicnzb['ComicYear']
        self._log("Year: " + seriesyear, logger.DEBUG)
        logger.fdebug("Year: " + str(seriesyear))
        comlocation = comicnzb['ComicLocation']
        self._log("Comic Location: " + comlocation, logger.DEBUG)
        logger.fdebug("Comic Location: " + str(comlocation))
        comversion = comicnzb['ComicVersion']
        self._log("Comic Version: " + str(comversion), logger.DEBUG)
        logger.fdebug("Comic Version: " + str(comversion))
        if comversion is None:
            comversion = 'None'
        #if comversion is None, remove it so it doesn't populate with 'None'
        if comversion == 'None':
            chunk_f_f = re.sub('\$VolumeN', '', mylar.FILE_FORMAT)
            chunk_f = re.compile(r'\s+')
            chunk_file_format = chunk_f.sub(' ', chunk_f_f)
            self._log(
                "No version # found for series - tag will not be available for renaming.",
                logger.DEBUG)
            logger.fdebug(
                "No version # found for series, removing from filename")
            logger.fdebug("new format is now: " + str(chunk_file_format))
        else:
            chunk_file_format = mylar.FILE_FORMAT

        if annchk == "no":
            chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
            chunk_f = re.compile(r'\s+')
            chunk_file_format = chunk_f.sub(' ', chunk_f_f)
            logger.fdebug('not an annual - removing from filename paramaters')
            logger.fdebug('new format: ' + str(chunk_file_format))

        else:
            logger.fdebug('chunk_file_format is: ' + str(chunk_file_format))
            if '$Annual' not in chunk_file_format:
                #if it's an annual, but $Annual isn't specified in file_format, we need to
                #force it in there, by default in the format of $Annual $Issue
                prettycomiss = "Annual " + str(prettycomiss)
                logger.fdebug('prettycomiss: ' + str(prettycomiss))

        ofilename = None

        #if meta-tagging is not enabled, we need to declare the check as being fail
        #if meta-tagging is enabled, it gets changed just below to a default of pass
        pcheck = "fail"

        #tag the meta.
        if mylar.ENABLE_META:
            self._log("Metatagging enabled - proceeding...")
            logger.fdebug("Metatagging enabled - proceeding...")
            pcheck = "pass"
            try:
                import cmtagmylar
                if ml is None:
                    pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid)
                else:
                    pcheck = cmtagmylar.run(self.nzb_folder,
                                            issueid=issueid,
                                            manual="yes",
                                            filename=ml['ComicLocation'])

            except ImportError:
                logger.fdebug(
                    "comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/"
                )
                logger.fdebug(
                    "continuing with PostProcessing, but I'm not using metadata."
                )
                pcheck = "fail"

            if pcheck == "fail":
                self._log(
                    "Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging..."
                )
                logger.fdebug(
                    "Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging..."
                )
            elif pcheck == "unrar error":
                self._log(
                    "This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy."
                )
                logger.error(
                    "This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy."
                )
                return self.log
            else:
                otofilename = pcheck
                self._log("Sucessfully wrote metadata to .cbz - Continuing..")
                logger.fdebug("Sucessfully wrote metadata to .cbz (" +
                              str(otofilename) + ") - Continuing..")
        #Run Pre-script

        if mylar.ENABLE_PRE_SCRIPTS:
            nzbn = self.nzb_name  #original nzb name
            nzbf = self.nzb_folder  #original nzb folder
            #name, comicyear, comicid , issueid, issueyear, issue, publisher
            #create the dic and send it.
            seriesmeta = []
            seriesmetadata = {}
            seriesmeta.append({
                'name': series,
                'comicyear': seriesyear,
                'comicid': comicid,
                'issueid': issueid,
                'issueyear': issueyear,
                'issue': issuenum,
                'publisher': publisher
            })
            seriesmetadata['seriesmeta'] = seriesmeta
            self._run_pre_scripts(nzbn, nzbf, seriesmetadata)

    #rename file and move to new path
    #nfilename = series + " " + issueno + " (" + seriesyear + ")"

        file_values = {
            '$Series': series,
            '$Issue': prettycomiss,
            '$Year': issueyear,
            '$series': series.lower(),
            '$Publisher': publisher,
            '$publisher': publisher.lower(),
            '$VolumeY': 'V' + str(seriesyear),
            '$VolumeN': comversion,
            '$monthname': month_name,
            '$month': month,
            '$Annual': 'Annual'
        }

        #if it's a Manual Run, use the ml['ComicLocation'] for the exact filename.
        if ml is None:

            for root, dirnames, filenames in os.walk(self.nzb_folder):
                for filename in filenames:
                    if filename.lower().endswith(extensions):
                        ofilename = filename
                        path, ext = os.path.splitext(ofilename)
        else:
            if pcheck == "fail":
                otofilename = ml['ComicLocation']
            logger.fdebug('otofilename:' + str(otofilename))
            odir, ofilename = os.path.split(otofilename)
            logger.fdebug('ofilename: ' + str(ofilename))
            path, ext = os.path.splitext(ofilename)
            logger.fdebug('path: ' + str(path))
            logger.fdebug('ext:' + str(ext))

        if ofilename is None:
            logger.error(
                u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that "
                + str(self.nzb_folder) +
                " exists and is the correct location.")
            return
        self._log("Original Filename: " + ofilename, logger.DEBUG)
        self._log("Original Extension: " + ext, logger.DEBUG)
        logger.fdebug("Original Filname: " + str(ofilename))
        logger.fdebug("Original Extension: " + str(ext))

        if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES:
            self._log(
                "Rename Files isn't enabled...keeping original filename.",
                logger.DEBUG)
            logger.fdebug(
                "Rename Files isn't enabled - keeping original filename.")
            #check if extension is in nzb_name - will screw up otherwise
            if ofilename.lower().endswith(extensions):
                nfilename = ofilename[:-4]
            else:
                nfilename = ofilename
        else:
            nfilename = helpers.replace_all(chunk_file_format, file_values)
            if mylar.REPLACE_SPACES:
                #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
        nfilename = re.sub('[\,\:\?]', '', nfilename)
        nfilename = re.sub('[\/]', '-', nfilename)
        self._log("New Filename: " + nfilename, logger.DEBUG)
        logger.fdebug("New Filename: " + str(nfilename))

        src = os.path.join(self.nzb_folder, ofilename)

        filechecker.validateAndCreateDirectory(comlocation, True)

        if mylar.LOWERCASE_FILENAMES:
            dst = (comlocation + "/" + nfilename + ext).lower()
        else:
            dst = comlocation + "/" + nfilename + ext.lower()
        self._log("Source:" + src, logger.DEBUG)
        self._log("Destination:" + dst, logger.DEBUG)
        logger.fdebug("Source: " + str(src))
        logger.fdebug("Destination: " + str(dst))

        if ml is None:
            #non-manual run moving/deleting...
            logger.fdebug('self.nzb_folder: ' + self.nzb_folder)
            logger.fdebug('ofilename:' + str(ofilename))
            logger.fdebug('nfilename:' + str(nfilename + ext))
            os.rename(os.path.join(self.nzb_folder, str(ofilename)),
                      os.path.join(self.nzb_folder, str(nfilename + ext)))
            src = os.path.join(self.nzb_folder, str(nfilename + ext))
            try:
                shutil.move(src, dst)
            except (OSError, IOError):
                self._log(
                    "Failed to move directory - check directories and manually re-run.",
                    logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return
            #tidyup old path
            try:
                shutil.rmtree(self.nzb_folder)
            except (OSError, IOError):
                self._log(
                    "Failed to remove temporary directory - check directory and manually re-run.",
                    logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return

            self._log("Removed temporary directory : " + str(self.nzb_folder),
                      logger.DEBUG)
        else:
            #Manual Run, this is the portion.
            logger.fdebug("Renaming " +
                          os.path.join(self.nzb_folder, str(ofilename)) +
                          " ..to.. " +
                          os.path.join(self.nzb_folder, str(nfilename + ext)))
            os.rename(os.path.join(self.nzb_folder, str(ofilename)),
                      os.path.join(self.nzb_folder, str(nfilename + ext)))
            src = os.path.join(self.nzb_folder, str(nfilename + ext))
            logger.fdebug("Moving " + src + " ... to ... " + dst)
            try:
                shutil.move(src, dst)
            except (OSError, IOError):
                logger.fdebug(
                    "Failed to move directory - check directories and manually re-run."
                )
                logger.fdebug("Post-Processing ABORTED.")
                return
            logger.fdebug("Successfully moved to : " + dst)
            #tidyup old path
            #try:
            #    os.remove(os.path.join(self.nzb_folder, str(ofilename)))
            #    logger.fdebug("Deleting : " + os.path.join(self.nzb_folder, str(ofilename)))
            #except (OSError, IOError):
            #    logger.fdebug("Failed to remove temporary directory - check directory and manually re-run.")
            #    logger.fdebug("Post-Processing ABORTED.")
            #    return
            #logger.fdebug("Removed temporary directory : " + str(self.nzb_folder))

            #delete entry from nzblog table
        myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
        #update snatched table to change status to Downloaded
        if annchk == "no":
            updater.foundsearch(comicid, issueid, down='True')
            dispiss = 'issue: ' + str(issuenumOG)
        else:
            updater.foundsearch(comicid, issueid, mode='want_ann', down='True')
            dispiss = 'annual issue: ' + str(issuenumOG)

            #force rescan of files
        updater.forceRescan(comicid)
        logger.info(u"Post-Processing completed for: " + series + " " +
                    dispiss)
        self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)

        # retrieve/create the corresponding comic objects
        if mylar.ENABLE_EXTRA_SCRIPTS:
            folderp = str(dst)  #folder location after move/rename
            nzbn = self.nzb_name  #original nzb name
            filen = str(nfilename + ext)  #new filename
            #name, comicyear, comicid , issueid, issueyear, issue, publisher
            #create the dic and send it.
            seriesmeta = []
            seriesmetadata = {}
            seriesmeta.append({
                'name': series,
                'comicyear': seriesyear,
                'comicid': comicid,
                'issueid': issueid,
                'issueyear': issueyear,
                'issue': issuenum,
                'publisher': publisher
            })
            seriesmetadata['seriesmeta'] = seriesmeta
            self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp,
                                    seriesmetadata)

        if ml is not None:
            return self.log
        else:
            if mylar.PROWL_ENABLED:
                pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG
                logger.info(u"Prowl request")
                prowl = notifiers.PROWL()
                prowl.notify(pushmessage,
                             "Download and Postprocessing completed")

            if mylar.NMA_ENABLED:
                nma = notifiers.NMA()
                nma.notify(series, str(issueyear), str(issuenumOG))

            if mylar.PUSHOVER_ENABLED:
                pushmessage = series + ' (' + str(
                    issueyear) + ') - issue #' + str(issuenumOG)
                logger.info(u"Pushover request")
                pushover = notifiers.PUSHOVER()
                pushover.notify(pushmessage,
                                "Download and Post-Processing completed")

            if mylar.BOXCAR_ENABLED:
                boxcar = notifiers.BOXCAR()
                boxcar.notify(series, str(issueyear), str(issuenumOG))

        return self.log
Ejemplo n.º 11
0
    #move the files...if imported is not empty (meaning it's not from the mass importer.)
    if imported is None or imported == 'None':
        pass
    else:
        if mylar.IMP_MOVE:
            logger.info("Mass import - Move files")
            moveit.movefiles(comicid, comlocation, ogcname)
        else:
            logger.info(
                "Mass import - Moving not Enabled. Setting Archived Status for import."
            )
            moveit.archivefiles(comicid, ogcname)

    #check for existing files...
    updater.forceRescan(comicid)

    if pullupd is None:
        # lets' check the pullist for anything at this time as well since we're here.
        # do this for only Present comics....
        if mylar.AUTOWANT_UPCOMING and lastpubdate == 'Present':  #and 'Present' in gcdinfo['resultPublished']:
            print("latestissue: #" + str(latestiss))
            chkstats = myDB.action(
                "SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?",
                [comicid, str(latestiss)]).fetchone()
            print chkstats['Status']
            if chkstats['Status'] == 'Skipped' or chkstats[
                    'Status'] == 'Wanted' or chkstats['Status'] == 'Snatched':
                logger.info(
                    u"Checking this week's pullist for new issues of " +
                    comic['ComicName'])
Ejemplo n.º 12
0
def GCDimport(gcomicid, pullupd=None,imported=None,ogcname=None):
    # this is for importing via GCD only and not using CV.
    # used when volume spanning is discovered for a Comic (and can't be added using CV).
    # Issue Counts are wrong (and can't be added).

    # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
    # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)
    
    gcdcomicid = gcomicid
    myDB = db.DBConnection()

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID":     gcdcomicid}

    comic = myDB.action('SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?', [gcomicid]).fetchone()
    ComicName = comic[0]
    ComicYear = comic[1]
    ComicIssues = comic[2]
    ComicPublished = comic[3]
    comlocation = comic[5]
    ComicPublisher = comic[6]
    #ComicImage = comic[4]
    #print ("Comic:" + str(ComicName))

    newValueDict = {"Status":   "Loading"}
    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now
    #comic = cv.getComic(comicid,'comic')

    if not comic:
        logger.warn("Error fetching comic. ID for : " + gcdcomicid)
        if dbcomic is None:
            newValueDict = {"ComicName":   "Fetch failed, try refreshing. (%s)" % (gcdcomicid),
                    "Status":   "Active"}
        else:
            newValueDict = {"Status":   "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return

    #run the re-sortorder here in order to properly display the page
    if pullupd is None:
        helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid)

    if ComicName.startswith('The '):
        sortname = ComicName[4:]
    else:
        sortname = ComicName


    logger.info(u"Now adding/updating: " + ComicName)
    #--Now that we know ComicName, let's try some scraping
    #--Start
    # gcd will return issue details (most importantly publishing date)
    comicid = gcomicid[1:]
    resultURL = "/series/" + str(comicid) + "/"
    gcdinfo=parseit.GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=gcdcomicid, TotalIssues=ComicIssues, issvariation=None, resultPublished=None)
    if gcdinfo == "No Match":
        logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")" )
        updater.no_searchresults(gcomicid)
        nomatch = "true"
        return nomatch
    logger.info(u"Sucessfully retrieved details for " + ComicName )
    # print ("Series Published" + parseit.resultPublished)
    #--End
    
    ComicImage = gcdinfo['ComicImage']

    #comic book location on machine
    # setup default location here
    if comlocation is None:
        # let's remove the non-standard characters here.
        u_comicnm = ComicName
        u_comicname = u_comicnm.encode('ascii', 'ignore').strip()
        if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname:
            comicdir = u_comicname
            if ':' in comicdir:
                comicdir = comicdir.replace(':','')
            if '/' in comicdir:
                comicdir = comicdir.replace('/','-')
            if ',' in comicdir:
                comicdir = comicdir.replace(',','')            
        else: comicdir = u_comicname

        series = comicdir
        publisher = ComicPublisher
        year = ComicYear

        #do work to generate folder path
        values = {'$Series':        series,
                  '$Publisher':     publisher,
                  '$Year':          year,
                  '$series':        series.lower(),
                  '$publisher':     publisher.lower(),
                  '$Volume':        year
                  }

        if mylar.FOLDER_FORMAT == '':
            comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
        else:
            comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values)

        #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")"
        if mylar.DESTINATION_DIR == "":
            logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
            return
        if mylar.REPLACE_SPACES:
            #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
            comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)

    #if it doesn't exist - create it (otherwise will bugger up later on)
    if os.path.isdir(str(comlocation)):
        logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
    else:
        #print ("Directory doesn't exist!")
        #try:
        #    os.makedirs(str(comlocation))
        #    logger.info(u"Directory successfully created at: " + str(comlocation))
        #except OSError:
        #    logger.error(u"Could not create comicdir : " + str(comlocation))
        filechecker.validateAndCreateDirectory(comlocation, True)

    comicIssues = gcdinfo['totalissues']

    #let's download the image...
    if os.path.exists(mylar.CACHE_DIR):pass
    else:
        #let's make the dir.
        try:
            os.makedirs(str(mylar.CACHE_DIR))
            logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))

        except OSError:
            logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR))

    coverfile = os.path.join(mylar.CACHE_DIR, str(gcomicid) + ".jpg")

    #try:
    urllib.urlretrieve(str(ComicImage), str(coverfile))
    try:
        with open(str(coverfile)) as f:
            ComicImage = os.path.join('cache',str(gcomicid) + ".jpg")

            #this is for Firefox when outside the LAN...it works, but I don't know how to implement it
            #without breaking the normal flow for inside the LAN (above)
            #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comi$

            logger.info(u"Sucessfully retrieved cover for " + ComicName)
            #if the comic cover local is checked, save a cover.jpg to the series folder.
            if mylar.COMIC_COVER_LOCAL:
                comiclocal = os.path.join(str(comlocation) + "/cover.jpg")
                shutil.copy(ComicImage,comiclocal)
    except IOError as e:
        logger.error(u"Unable to save cover locally at this time.")
        
    #if comic['ComicVersion'].isdigit():
    #    comicVol = "v" + comic['ComicVersion']
    #else:
    #    comicVol = None


    controlValueDict = {"ComicID":      gcomicid}
    newValueDict = {"ComicName":        ComicName,
                    "ComicSortName":    sortname,
                    "ComicYear":        ComicYear,
                    "Total":            comicIssues,
                    "ComicLocation":    comlocation,
                    #"ComicVersion":     comicVol,
                    "ComicImage":       ComicImage,
                    #"ComicPublisher":   comic['ComicPublisher'],
                    #"ComicPublished":   comicPublished,
                    "DateAdded":        helpers.today(),
                    "Status":           "Loading"}

    myDB.upsert("comics", newValueDict, controlValueDict)

    #comicsort here...
    #run the re-sortorder here in order to properly display the page
    if pullupd is None:
        helpers.ComicSort(sequence='update')

    logger.info(u"Sucessfully retrieved issue details for " + ComicName )
    n = 0
    iscnt = int(comicIssues)
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    #print ("total issues:" + str(iscnt))
    #---removed NEW code here---
    logger.info(u"Now adding/updating issues for " + ComicName)
    bb = 0
    while (bb <= iscnt):
        #---NEW.code
        try:
            gcdval = gcdinfo['gcdchoice'][bb]
            #print ("gcdval: " + str(gcdval))
        except IndexError:
            #account for gcd variation here
            if gcdinfo['gcdvariation'] == 'gcd':
                #print ("gcd-variation accounted for.")
                issdate = '0000-00-00'
                int_issnum =  int ( issis / 1000 )
            break
        if 'nn' in str(gcdval['GCDIssue']):
            #no number detected - GN, TP or the like
            logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
            updater.no_searchresults(comicid)
            return
        elif '.' in str(gcdval['GCDIssue']):
            issst = str(gcdval['GCDIssue']).find('.')
            issb4dec = str(gcdval['GCDIssue'])[:issst]
            #if the length of decimal is only 1 digit, assume it's a tenth
            decis = str(gcdval['GCDIssue'])[issst+1:]
            if len(decis) == 1:
                decisval = int(decis) * 10
                issaftdec = str(decisval)
            if len(decis) == 2:
                decisval = int(decis)
                issaftdec = str(decisval)
            if int(issaftdec) == 0: issaftdec = "00"
            gcd_issue = issb4dec + "." + issaftdec
            gcdis = (int(issb4dec) * 1000) + decisval
        else:
            gcdis = int(str(gcdval['GCDIssue'])) * 1000
            gcd_issue = str(gcdval['GCDIssue'])
        #get the latest issue / date using the date.
        int_issnum = int( gcdis / 1000 )
        issdate = str(gcdval['GCDDate'])
        issid = "G" + str(gcdval['IssueID'])
        if gcdval['GCDDate'] > latestdate:
            latestiss = str(gcd_issue)
            latestdate = str(gcdval['GCDDate'])
        #print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
        #---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?', [issid]).fetchone()


        # Only change the status & add DateAdded if the issue is not already in the database
        if iss_exists is None:
            newValueDict['DateAdded'] = helpers.today()

        #adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
        if "?" in str(issdate):
            issdate = "0000-00-00"             

        controlValueDict = {"IssueID":  issid}
        newValueDict = {"ComicID":            gcomicid,
                        "ComicName":          ComicName,
                        "Issue_Number":       gcd_issue,
                        "IssueDate":          issdate,
                        "Int_IssueNumber":    int_issnum
                        }

        #print ("issueid:" + str(controlValueDict))
        #print ("values:" + str(newValueDict))

        if mylar.AUTOWANT_ALL:
            newValueDict['Status'] = "Wanted"
        elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING:
            newValueDict['Status'] = "Wanted"
        else:
            newValueDict['Status'] = "Skipped"

        if iss_exists:
            #print ("Existing status : " + str(iss_exists['Status']))
            newValueDict['Status'] = iss_exists['Status']


        myDB.upsert("issues", newValueDict, controlValueDict)
        bb+=1

#        logger.debug(u"Updating comic cache for " + ComicName)
#        cache.getThumb(ComicID=issue['issueid'])

#        logger.debug(u"Updating cache for: " + ComicName)
#        cache.getThumb(ComicIDcomicid)


    controlValueStat = {"ComicID":     gcomicid}
    newValueStat = {"Status":          "Active",
                    "LatestIssue":     latestiss,
                    "LatestDate":      latestdate,
                    "LastUpdated":     helpers.now()
                   }

    myDB.upsert("comics", newValueStat, controlValueStat)

    if mylar.CVINFO:
        if not os.path.exists(comlocation + "/cvinfo"):
            with open(comlocation + "/cvinfo","w") as text_file:
                text_file.write("http://www.comicvine.com/volume/49-" + str(comicid))

    logger.info(u"Updating complete for: " + ComicName)

    #move the files...if imported is not empty (meaning it's not from the mass importer.)
    if imported is None or imported == 'None':
        pass
    else:
        if mylar.IMP_MOVE:
            logger.info("Mass import - Move files")
            moveit.movefiles(gcomicid,comlocation,ogcname)
        else:
            logger.info("Mass import - Moving not Enabled. Setting Archived Status for import.")
            moveit.archivefiles(gcomicid,ogcname)

    #check for existing files...
    updater.forceRescan(gcomicid)


    if pullupd is None:
        # lets' check the pullist for anyting at this time as well since we're here.
        if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished:
            logger.info(u"Checking this week's pullist for new issues of " + ComicName)
            updater.newpullcheck(comic['ComicName'], gcomicid)

        #here we grab issues that have been marked as wanted above...

        results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid])
        if results:
            logger.info(u"Attempting to grab wanted issues for : "  + ComicName)

            for result in results:
                foundNZB = "none"
                if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST):
                    foundNZB = search.searchforissue(result['IssueID'])
                    if foundNZB == "yes":
                        updater.foundsearch(result['ComicID'], result['IssueID'])
        else: logger.info(u"No issues marked as wanted for " + ComicName)

        logger.info(u"Finished grabbing what I could.")
Ejemplo n.º 13
0
def addComictoDB(comicid):
    
    # Putting this here to get around the circular import. Will try to use this to update images at later date.
    from mylar import cache
    
    myDB = db.DBConnection()
    
    # myDB.action('DELETE from blacklist WHERE ComicID=?', [comicid])

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID":     comicid}

    dbcomic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone()
    if dbcomic is None:
        newValueDict = {"ComicName":   "Comic ID: %s" % (comicid),
                "Status":   "Loading"}
    else:
        newValueDict = {"Status":   "Loading"}

    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now        
    comic = cv.getComic(comicid,'comic')

    if not comic:
        logger.warn("Error fetching comic. ID for : " + comicid)
        if dbcomic is None:
            newValueDict = {"ComicName":   "Fetch failed, try refreshing. (%s)" % (comicid),
                    "Status":   "Active"}
        else:
            newValueDict = {"Status":   "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return
    
    if comic['ComicName'].startswith('The '):
        sortname = comic['ComicName'][4:]
    else:
        sortname = comic['ComicName']
        

    logger.info(u"Now adding/updating: " + comic['ComicName'])
    #--Now that we know ComicName, let's try some scraping
    #--Start
    # gcd will return issue details (most importantly publishing date)
    gcdinfo=parseit.GCDScraper(comic['ComicName'], comic['ComicYear'], comic['ComicIssues'], comicid) 
    if gcdinfo == "No Match":
        logger.warn("No matching result found for " + comic['ComicName'] + " (" + comic['ComicYear'] + ")" )
        updater.no_searchresults(comicid)
        return
    logger.info(u"Sucessfully retrieved details for " + comic['ComicName'] )
    # print ("Series Published" + parseit.resultPublished)
    #--End

    #comic book location on machine
    # setup default location here
    if ':' in comic['ComicName']: 
        comicdir = comic['ComicName'].replace(':','')
    else: comicdir = comic['ComicName']
    comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
    if mylar.DESTINATION_DIR == "":
        logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
        return
    if mylar.REPLACE_SPACES:
        #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
        comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)
    #if it doesn't exist - create it (otherwise will bugger up later on)
    if os.path.isdir(str(comlocation)):
        logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
    else:
        #print ("Directory doesn't exist!")
        try:
            os.makedirs(str(comlocation))
            logger.info(u"Directory successfully created at: " + str(comlocation))
        except OSError.e:
            if e.errno != errno.EEXIST:
                raise

    #print ("root dir for series: " + comlocation)
    #try to account for CV not updating new issues as fast as GCD
    #seems CV doesn't update total counts
    #comicIssues = gcdinfo['totalissues']
    if gcdinfo['gcdvariation'] == "cv":
        comicIssues = str(int(comic['ComicIssues']) + 1)
    else:
        comicIssues = comic['ComicIssues']
    controlValueDict = {"ComicID":      comicid}
    newValueDict = {"ComicName":        comic['ComicName'],
                    "ComicSortName":    sortname,
                    "ComicYear":        comic['ComicYear'],
                    "ComicImage":       comic['ComicImage'],
                    "Total":            comicIssues,
                    "ComicLocation":    comlocation,
                    "ComicPublisher":   comic['ComicPublisher'],
                    "ComicPublished":   parseit.resultPublished,
                    "DateAdded":        helpers.today(),
                    "Status":           "Loading"}
    
    myDB.upsert("comics", newValueDict, controlValueDict)
    
    issued = cv.getComic(comicid,'issue')
    logger.info(u"Sucessfully retrieved issue details for " + comic['ComicName'] )
    n = 0
    iscnt = int(comicIssues)
    issid = []
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    #print ("total issues:" + str(iscnt))
    #---removed NEW code here---
    logger.info(u"Now adding/updating issues for" + comic['ComicName'])

    # file check to see if issue exists
    logger.info(u"Checking directory for existing issues.")
    #fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName'])
    #havefiles = 0

    #fccnt = int(fc['comiccount'])
    #logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying")
    #fcnew = []

    while (n <= iscnt):
        #---NEW.code
        try:
            firstval = issued['issuechoice'][n]
        except IndexError:
            break
        cleanname = helpers.cleanName(firstval['Issue_Name'])
        issid = str(firstval['Issue_ID'])
        issnum = str(firstval['Issue_Number'])
        issname = cleanname
        if '.' in str(issnum):
            issn_st = str(issnum).find('.')
            issn_b4dec = str(issnum)[:issn_st]
            #if the length of decimal is only 1 digit, assume it's a tenth
            dec_is = str(issnum)[issn_st + 1:]
            if len(dec_is) == 1:
                dec_nisval = int(dec_is) * 10
                iss_naftdec = str(dec_nisval)
            if len(dec_is) == 2:
                dec_nisval = int(dec_is)
                iss_naftdec = str(dec_nisval)
            iss_issue = issn_b4dec + "." + iss_naftdec
            issis = (int(issn_b4dec) * 1000) + dec_nisval
        else: issis = int(issnum) * 1000

        bb = 0
        while (bb <= iscnt):
            try: 
                gcdval = gcdinfo['gcdchoice'][bb]
            except IndexError:
                #account for gcd variation here
                if gcdinfo['gcdvariation'] == 'gcd':
                    print ("gcd-variation accounted for.")
                    issdate = '0000-00-00'
                    int_issnum =  int ( issis / 1000 )
                break
            if 'nn' in str(gcdval['GCDIssue']):
                #no number detected - GN, TP or the like
                logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
                updater.no_searchresults(comicid)
                return
            elif '.' in str(gcdval['GCDIssue']):
                issst = str(gcdval['GCDIssue']).find('.')
                issb4dec = str(gcdval['GCDIssue'])[:issst]
                #if the length of decimal is only 1 digit, assume it's a tenth
                decis = str(gcdval['GCDIssue'])[issst+1:]
                if len(decis) == 1:
                    decisval = int(decis) * 10
                    issaftdec = str(decisval)
                if len(decis) == 2:
                    decisval = int(decis)
                    issaftdec = str(decisval)
                gcd_issue = issb4dec + "." + issaftdec
                gcdis = (int(issb4dec) * 1000) + decisval
            else:
                gcdis = int(str(gcdval['GCDIssue'])) * 1000
            if gcdis == issis:
                issdate = str(gcdval['GCDDate'])
                int_issnum = int( gcdis / 1000 )
                #get the latest issue / date using the date.
                if gcdval['GCDDate'] > latestdate:
                    latestiss = str(issnum)
                    latestdate = str(gcdval['GCDDate'])
                    break
                #bb = iscnt
            bb+=1
        #print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate))
        #---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.select('SELECT * from issues WHERE IssueID=?', [issid])

        # Only change the status & add DateAdded if the issue is not already in the database
        if not len(iss_exists):
            newValueDict['DateAdded'] = helpers.today()

        controlValueDict = {"IssueID":  issid}
        newValueDict = {"ComicID":            comicid,
                        "ComicName":          comic['ComicName'],
                        "IssueName":          issname,
                        "Issue_Number":       issnum,
                        "IssueDate":          issdate,
                        "Int_IssueNumber":    int_issnum
                        }        
        if mylar.AUTOWANT_ALL:
            newValueDict['Status'] = "Wanted"
            #elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING:
            #    newValueDict['Status'] = "Wanted"
        else:
            newValueDict['Status'] = "Skipped"

        myDB.upsert("issues", newValueDict, controlValueDict)
        n+=1

#        logger.debug(u"Updating comic cache for " + comic['ComicName'])
#        cache.getThumb(ComicID=issue['issueid'])
            
#        logger.debug(u"Updating cache for: " + comic['ComicName'])
#        cache.getThumb(ComicIDcomicid)

    #check for existing files...
    updater.forceRescan(comicid)

    controlValueStat = {"ComicID":     comicid}
    newValueStat = {"Status":          "Active",
                    "LatestIssue":     latestiss,
                    "LatestDate":      latestdate
                   }

    myDB.upsert("comics", newValueStat, controlValueStat)
  
    logger.info(u"Updating complete for: " + comic['ComicName'])
    
    #here we grab issues that have been marked as wanted above...
  
    results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid])    
    if results:
        logger.info(u"Attempting to grab wanted issues for : "  + comic['ComicName'])

        for result in results:
            foundNZB = "none"
            if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
                foundNZB = search.searchforissue(result['IssueID'])
                if foundNZB == "yes":
                    updater.foundsearch(result['ComicID'], result['IssueID'])
    else: logger.info(u"No issues marked as wanted for " + comic['ComicName'])

    logger.info(u"Finished grabbing what I could.")
Ejemplo n.º 14
0
    def Process(self):
            self._log("nzb name: " + str(self.nzb_name), logger.DEBUG)
            self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
            logger.fdebug("nzb name: " + str(self.nzb_name))
            logger.fdebug("nzb folder: " + str(self.nzb_folder))
            #lookup nzb_name in nzblog table to get issueid

            #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
            #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
            querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
            #logger.info("querysab_string:" + str(querysab))
            file = urllib2.urlopen(querysab)
            data = file.read()
            file.close()
            dom = parseString(data)

            sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
            sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
            logger.fdebug("SAB Replace Spaces: " + str(sabreps))
            logger.fdebug("SAB Replace Dots: " + str(sabrepd))
            myDB = db.DBConnection()

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext, logger.DEBUG)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:]', '', str(nzbname))

            logger.fdebug("After conversions, nzbname is : " + str(nzbname))
            self._log("nzbname: " + str(nzbname), logger.DEBUG)

            nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()

            if nzbiss is None:
                self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG)
                logger.fdebug("Failure - could not locate nzbfile initially.")
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG)
                logger.fdebug("trying again with nzbname of : " + str(nzbname))
                nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.")
                    return
                else:
                    self._log("I corrected and found the nzb as : " + str(nzbname))
                    logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname))
                    issueid = nzbiss['IssueID']
            else: 
                issueid = nzbiss['IssueID']
                #use issueid to get publisher, series, year, issue number
            issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
            comicid = issuenzb['ComicID']
            issuenum = issuenzb['Issue_Number']
            #issueno = str(issuenum).split('.')[0]

            iss_find = issuenum.find('.')
            iss_b4dec = issuenum[:iss_find]
            iss_decval = issuenum[iss_find+1:]
            if int(iss_decval) == 0:
                iss = iss_b4dec
                issdec = int(iss_decval)
                issueno = str(iss)
                self._log("Issue Number: " + str(issueno), logger.DEBUG)
                logger.fdebug("Issue Number: " + str(issueno))
            else:
                if len(iss_decval) == 1:
                    iss = iss_b4dec + "." + iss_decval
                    issdec = int(iss_decval) * 10
                else:
                    iss = iss_b4dec + "." + iss_decval.rstrip('0')
                    issdec = int(iss_decval.rstrip('0')) * 10
                issueno = iss_b4dec
                self._log("Issue Number: " + str(iss), logger.DEBUG)
                logger.fdebug("Issue Number: " + str(iss))

            # issue zero-suppression here
            if mylar.ZERO_LEVEL == "0": 
                zeroadd = ""
            else:
                if mylar.ZERO_LEVEL_N  == "none": zeroadd = ""
                elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
                elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"

            logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N))

            if str(len(issueno)) > 1:
                if int(issueno) < 10:
                    self._log("issue detected less than 10", logger.DEBUG)
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                        prettycomiss = str(zeroadd) + str(iss)
                    else:
                        prettycomiss = str(zeroadd) + str(int(issueno))
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                elif int(issueno) >= 10 and int(issueno) < 100:
                    self._log("issue detected greater than 10, but less than 100", logger.DEBUG)
                    if mylar.ZERO_LEVEL_N == "none":
                        zeroadd = ""
                    else:
                        zeroadd = "0"
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                        prettycomiss = str(zeroadd) + str(iss)
                    else:
                        prettycomiss = str(zeroadd) + str(int(issueno))
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                else:
                    self._log("issue detected greater than 100", logger.DEBUG)
                    if int(iss_decval) > 0:
                        issueno = str(iss)
                    prettycomiss = str(issueno)
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
            else:
                prettycomiss = str(issueno)
                self._log("issue length error - cannot determine length. Defaulting to None:  " + str(prettycomiss), logger.DEBUG)

            logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
            issueyear = issuenzb['IssueDate'][:4]
            self._log("Issue Year: " + str(issueyear), logger.DEBUG)
            logger.fdebug("Issue Year : " + str(issueyear))
            comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            publisher = comicnzb['ComicPublisher']
            self._log("Publisher: " + publisher, logger.DEBUG)
            logger.fdebug("Publisher: " + str(publisher))
            series = comicnzb['ComicName']
            self._log("Series: " + series, logger.DEBUG)
            logger.fdebug("Series: " + str(series))
            seriesyear = comicnzb['ComicYear']
            self._log("Year: " + seriesyear, logger.DEBUG)
            logger.fdebug("Year: "  + str(seriesyear))
            comlocation = comicnzb['ComicLocation']
            self._log("Comic Location: " + comlocation, logger.DEBUG)
            logger.fdebug("Comic Location: " + str(comlocation))

            #Run Pre-script

            if mylar.ENABLE_PRE_SCRIPTS:
                nzbn = self.nzb_name #original nzb name
                nzbf = self.nzb_folder #original nzb folder
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_pre_scripts(nzbn, nzbf, seriesmetadata )

        #rename file and move to new path
        #nfilename = series + " " + issueno + " (" + seriesyear + ")"

            file_values = {'$Series':    series,
                           '$Issue':     prettycomiss,
                           '$Year':      issueyear,
                           '$series':    series.lower(),
                           '$Publisher': publisher,
                           '$publisher': publisher.lower(),
                           '$Volume':    seriesyear
                          }

            for root, dirnames, filenames in os.walk(self.nzb_folder):
                for filename in filenames:
                    if filename.lower().endswith(extensions):
                        ofilename = filename
                        path, ext = os.path.splitext(ofilename)
            self._log("Original Filename: " + ofilename, logger.DEBUG)
            self._log("Original Extension: " + ext, logger.DEBUG)
            logger.fdebug("Original Filname: " + str(ofilename))
            logger.fdebug("Original Extension: " + str(ext))

            if mylar.FILE_FORMAT == '':
                self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG)
                logger.fdebug("Rename Files isn't enabled - keeping original filename.")
                #check if extension is in nzb_name - will screw up otherwise
                if ofilename.lower().endswith(extensions):
                    nfilename = ofilename[:-4]
                else:
                    nfilename = ofilename
            else:
                nfilename = helpers.replace_all(mylar.FILE_FORMAT, file_values)
                if mylar.REPLACE_SPACES:
                    #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                    nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
            nfilename = re.sub('[\,\:]', '', nfilename)
            self._log("New Filename: " + nfilename, logger.DEBUG)
            logger.fdebug("New Filename: " + str(nfilename))

            src = os.path.join(self.nzb_folder, ofilename)
            if mylar.LOWERCASE_FILENAMES:
                dst = (comlocation + "/" + nfilename + ext).lower()
            else:
                dst = comlocation + "/" + nfilename + ext.lower()    
            self._log("Source:" + src, logger.DEBUG)
            self._log("Destination:" +  dst, logger.DEBUG)
            logger.fdebug("Source: " + str(src))
            logger.fdebug("Destination: " + str(dst))

            os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
            src = os.path.join(self.nzb_folder, str(nfilename + ext))
            try:
                shutil.move(src, dst)
            except (OSError, IOError):
                self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return
            #tidyup old path
            try:
                shutil.rmtree(self.nzb_folder)
            except (OSError, IOError):
                self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return

            self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
                    #delete entry from nzblog table
            myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                    #force rescan of files
            updater.forceRescan(comicid)
            logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenum) )
            self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)

            # retrieve/create the corresponding comic objects

            if mylar.ENABLE_EXTRA_SCRIPTS:
                folderp = str(dst) #folder location after move/rename
                nzbn = self.nzb_name #original nzb name
                filen = str(nfilename + ext) #new filename
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata )

            return self.log
Ejemplo n.º 15
0
                        control = {"IssueID": issuechk['IssueID']}
                        values = {"Status": "Archived"}
                        logger.fdebug('...changing status of ' +
                                      str(issuechk['Issue_Number']) +
                                      ' to Archived ')
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    wat += 1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd):
                    logger.fdebug('Rescanning.. ' + str(c))
                    updater.forceRescan(c)
        if not len(import_by_comicids):
            return "Completed"

    if len(import_by_comicids) > 0 or len(vals) > 0:
        #import_comicids['comic_info'] = import_by_comicids
        #if vals:
        #    import_comicids['issueid_info'] = vals
        #else:
        #    import_comicids['issueid_info'] = None
        if vals:
            cvimport_comicids = vals
            import_cv_ids = len(vals)
        else:
            cvimport_comicids = None
            import_cv_ids = 0
Ejemplo n.º 16
0
    def Process(self):
            self._log("nzb name: " + str(self.nzb_name), logger.DEBUG)
            self._log("nzb folder: " + str(self.nzb_folder), logger.DEBUG)
            logger.fdebug("nzb name: " + str(self.nzb_name))
            logger.fdebug("nzb folder: " + str(self.nzb_folder))
            if mylar.USE_SABNZBD==0:
                logger.fdebug("Not using SABNzbd")
            else:
                # if the SAB Directory option is enabled, let's use that folder name and append the jobname.
                if mylar.SAB_DIRECTORY is not None and mylar.SAB_DIRECTORY is not 'None' and len(mylar.SAB_DIRECTORY) > 4:
                    self.nzb_folder = os.path.join(mylar.SAB_DIRECTORY, self.nzb_name).encode(mylar.SYS_ENCODING)
    
                #lookup nzb_name in nzblog table to get issueid
    
                #query SAB to find out if Replace Spaces enabled / not as well as Replace Decimals
                #http://localhost:8080/sabnzbd/api?mode=set_config&section=misc&keyword=dirscan_speed&value=5
                querysab = str(mylar.SAB_HOST) + "/api?mode=get_config&section=misc&output=xml&apikey=" + str(mylar.SAB_APIKEY)
                #logger.info("querysab_string:" + str(querysab))
                file = urllib2.urlopen(querysab)
                data = file.read()
                file.close()
                dom = parseString(data)

                try:
                    sabreps = dom.getElementsByTagName('replace_spaces')[0].firstChild.wholeText
                except:
                    errorm = dom.getElementsByTagName('error')[0].firstChild.wholeText
                    logger.error(u"Error detected attempting to retrieve SAB data : " + errorm)
                    return
                sabrepd = dom.getElementsByTagName('replace_dots')[0].firstChild.wholeText
                logger.fdebug("SAB Replace Spaces: " + str(sabreps))
                logger.fdebug("SAB Replace Dots: " + str(sabrepd))
            if mylar.USE_NZBGET==1:
                logger.fdebug("Using NZBGET")
                logger.fdebug("NZB name as passed from NZBGet: " + self.nzb_name)
            myDB = db.DBConnection()

            nzbname = self.nzb_name
            #remove extensions from nzb_name if they somehow got through (Experimental most likely)
            extensions = ('.cbr', '.cbz')

            if nzbname.lower().endswith(extensions):
                fd, ext = os.path.splitext(nzbname)
                self._log("Removed extension from nzb: " + ext, logger.DEBUG)
                nzbname = re.sub(str(ext), '', str(nzbname))

            #replace spaces
            nzbname = re.sub(' ', '.', str(nzbname))
            nzbname = re.sub('[\,\:\?]', '', str(nzbname))
            nzbname = re.sub('[\&]', 'and', str(nzbname))

            logger.fdebug("After conversions, nzbname is : " + str(nzbname))
#            if mylar.USE_NZBGET==1:
#                nzbname=self.nzb_name
            self._log("nzbname: " + str(nzbname), logger.DEBUG)

            nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()

            if nzbiss is None:
                self._log("Failure - could not initially locate nzbfile in my database to rename.", logger.DEBUG)
                logger.fdebug("Failure - could not locate nzbfile initially.")
                # if failed on spaces, change it all to decimals and try again.
                nzbname = re.sub('_', '.', str(nzbname))
                self._log("trying again with this nzbname: " + str(nzbname), logger.DEBUG)
                logger.fdebug("trying again with nzbname of : " + str(nzbname))
                nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzbname]).fetchone()
                if nzbiss is None:
                    logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.")
                    return
                else:
                    self._log("I corrected and found the nzb as : " + str(nzbname))
                    logger.fdebug("auto-corrected and found the nzb as : " + str(nzbname))
                    issueid = nzbiss['IssueID']
            else: 
                issueid = nzbiss['IssueID']
                logger.fdebug("issueid:" + str(issueid))
                sarc = nzbiss['SARC']
                #use issueid to get publisher, series, year, issue number
            issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
            if issuenzb is not None:
                if helpers.is_number(issueid):
                    sandwich = int(issuenzb['IssueID'])
            else:
                #if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
                #using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
                if 'S' in issueid:
                    sandwich = issueid
                elif 'G' in issueid: 
                    sandwich = 1
            if helpers.is_number(sandwich):
                if sandwich < 900000:
                    # if sandwich is less than 900000 it's a normal watchlist download. Bypass.
                    pass
            else:
                if issuenzb is None or 'S' in sandwich or int(sandwich) >= 900000:
                    # this has no issueID, therefore it's a one-off or a manual post-proc.
                    # At this point, let's just drop it into the Comic Location folder and forget about it..
                    if 'S' in sandwich:
                        self._log("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
                        logger.info("One-off STORYARC mode enabled for Post-Processing for " + str(sarc))
                        if mylar.STORYARCDIR:
                            storyarcd = os.path.join(mylar.DESTINATION_DIR, "StoryArcs", sarc)
                            self._log("StoryArc Directory set to : " + storyarcd, logger.DEBUG)
                        else:
                            self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)

                    else:
                        self._log("One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory.", logger.DEBUG)
                        logger.info("One-off mode enabled for Post-Processing. Will move into Grab-bag directory.")
                        self._log("Grab-Bag Directory set to : " + mylar.GRABBAG_DIR, logger.DEBUG)

                    for root, dirnames, filenames in os.walk(self.nzb_folder):
                        for filename in filenames:
                            if filename.lower().endswith(extensions):
                                ofilename = filename
                                path, ext = os.path.splitext(ofilename)
      
                    if 'S' in sandwich:
                        if mylar.STORYARCDIR:
                            grdst = storyarcd
                        else:
                            grdst = mylar.DESTINATION_DIR
                    else:
                        if mylar.GRABBAG_DIR:
                            grdst = mylar.GRABBAG_DIR
                        else:
                            grdst = mylar.DESTINATION_DIR

                    filechecker.validateAndCreateDirectory(grdst, True)
    
                    grab_dst = os.path.join(grdst, ofilename)
                    self._log("Destination Path : " + grab_dst, logger.DEBUG)
                    logger.info("Destination Path : " + grab_dst)
                    grab_src = os.path.join(self.nzb_folder, ofilename)
                    self._log("Source Path : " + grab_src, logger.DEBUG)
                    logger.info("Source Path : " + grab_src)

                    logger.info("Moving " + str(ofilename) + " into directory : " + str(grdst))

                    try:
                        shutil.move(grab_src, grab_dst)
                    except (OSError, IOError):
                        self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                        logger.debug("Failed to move directory - check directories and manually re-run.")
                        return
                    #tidyup old path
                    try:
                        shutil.rmtree(self.nzb_folder)
                    except (OSError, IOError):
                        self._log("Failed to remove temporary directory.", logger.DEBUG)
                        logger.debug("Failed to remove temporary directory - check directory and manually re-run.")
                        return

                    logger.debug("Removed temporary directory : " + str(self.nzb_folder))
                    self._log("Removed temporary directory : " + self.nzb_folder, logger.DEBUG)
                    #delete entry from nzblog table
                    myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])

                    if 'S' in issueid:
                        issuearcid = re.sub('S', '', issueid)
                        logger.info("IssueArcID is : " + str(issuearcid))
                        ctrlVal = {"IssueArcID":  issuearcid}
                        newVal = {"Status":    "Downloaded",
                                  "Location":  grab_dst }
                        myDB.upsert("readinglist",newVal,ctrlVal)
                        logger.info("updated status to Downloaded")
                    return self.log

            comicid = issuenzb['ComicID']
            issuenumOG = issuenzb['Issue_Number']
            #issueno = str(issuenum).split('.')[0]
            #new CV API - removed all decimals...here we go AGAIN!
            issuenum = issuenumOG
            issue_except = 'None'
            if 'au' in issuenum.lower():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = ' AU'
            if '.' in issuenum:
                iss_find = issuenum.find('.')
                iss_b4dec = issuenum[:iss_find]
                iss_decval = issuenum[iss_find+1:]
                if int(iss_decval) == 0:
                    iss = iss_b4dec
                    issdec = int(iss_decval)
                    issueno = str(iss)
                    self._log("Issue Number: " + str(issueno), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(issueno))
                else:
                    if len(iss_decval) == 1:
                        iss = iss_b4dec + "." + iss_decval
                        issdec = int(iss_decval) * 10
                    else:
                        iss = iss_b4dec + "." + iss_decval.rstrip('0')
                        issdec = int(iss_decval.rstrip('0')) * 10
                    issueno = iss_b4dec
                    self._log("Issue Number: " + str(iss), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(iss))
            else:
                iss = issuenum
                issueno = str(iss)
            # issue zero-suppression here
            if mylar.ZERO_LEVEL == "0": 
                zeroadd = ""
            else:
                if mylar.ZERO_LEVEL_N  == "none": zeroadd = ""
                elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
                elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"

            logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N))

            if str(len(issueno)) > 1:
                if int(issueno) < 10:
                    self._log("issue detected less than 10", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                            prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None': 
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                elif int(issueno) >= 10 and int(issueno) < 100:
                    self._log("issue detected greater than 10, but less than 100", logger.DEBUG)
                    if mylar.ZERO_LEVEL_N == "none":
                        zeroadd = ""
                    else:
                        zeroadd = "0"
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                           prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                else:
                    self._log("issue detected greater than 100", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                    prettycomiss = str(issueno)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
            else:
                prettycomiss = str(issueno)
                self._log("issue length error - cannot determine length. Defaulting to None:  " + str(prettycomiss), logger.DEBUG)

            logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
            issueyear = issuenzb['IssueDate'][:4]
            self._log("Issue Year: " + str(issueyear), logger.DEBUG)
            logger.fdebug("Issue Year : " + str(issueyear))
            comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            publisher = comicnzb['ComicPublisher']
            self._log("Publisher: " + publisher, logger.DEBUG)
            logger.fdebug("Publisher: " + str(publisher))
            #we need to un-unicode this to make sure we can write the filenames properly for spec.chars
            series = comicnzb['ComicName'].encode('ascii', 'ignore').strip()
            self._log("Series: " + series, logger.DEBUG)
            logger.fdebug("Series: " + str(series))
            seriesyear = comicnzb['ComicYear']
            self._log("Year: " + seriesyear, logger.DEBUG)
            logger.fdebug("Year: "  + str(seriesyear))
            comlocation = comicnzb['ComicLocation']
            self._log("Comic Location: " + comlocation, logger.DEBUG)
            logger.fdebug("Comic Location: " + str(comlocation))
            comversion = comicnzb['ComicVersion']
            self._log("Comic Version: " + str(comversion), logger.DEBUG)
            logger.fdebug("Comic Version: " + str(comversion))
            if comversion is None:
                comversion = 'None'
            #if comversion is None, remove it so it doesn't populate with 'None'
            if comversion == 'None':
                chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
                chunk_f = re.compile(r'\s+')
                chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG)
                logger.fdebug("No version # found for series, removing from filename")
                logger.fdebug("new format is now: " + str(chunk_file_format))
            else:
                chunk_file_format = mylar.FILE_FORMAT
            #Run Pre-script

            if mylar.ENABLE_PRE_SCRIPTS:
                nzbn = self.nzb_name #original nzb name
                nzbf = self.nzb_folder #original nzb folder
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_pre_scripts(nzbn, nzbf, seriesmetadata )

        #rename file and move to new path
        #nfilename = series + " " + issueno + " (" + seriesyear + ")"

            file_values = {'$Series':    series,
                           '$Issue':     prettycomiss,
                           '$Year':      issueyear,
                           '$series':    series.lower(),
                           '$Publisher': publisher,
                           '$publisher': publisher.lower(),
                           '$VolumeY':   'V' + str(seriesyear),
                           '$VolumeN':   comversion
                          }

            ofilename = None

            for root, dirnames, filenames in os.walk(self.nzb_folder):
                for filename in filenames:
                    if filename.lower().endswith(extensions):
                        ofilename = filename
                        path, ext = os.path.splitext(ofilename)

            if ofilename is None:
                logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.")
                return
            self._log("Original Filename: " + ofilename, logger.DEBUG)
            self._log("Original Extension: " + ext, logger.DEBUG)
            logger.fdebug("Original Filname: " + str(ofilename))
            logger.fdebug("Original Extension: " + str(ext))

            if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES:
                self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG)
                logger.fdebug("Rename Files isn't enabled - keeping original filename.")
                #check if extension is in nzb_name - will screw up otherwise
                if ofilename.lower().endswith(extensions):
                    nfilename = ofilename[:-4]
                else:
                    nfilename = ofilename
            else:
                nfilename = helpers.replace_all(chunk_file_format, file_values)
                if mylar.REPLACE_SPACES:
                    #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                    nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
            nfilename = re.sub('[\,\:\?]', '', nfilename)
            self._log("New Filename: " + nfilename, logger.DEBUG)
            logger.fdebug("New Filename: " + str(nfilename))

            src = os.path.join(self.nzb_folder, ofilename)

            filechecker.validateAndCreateDirectory(comlocation, True)

            if mylar.LOWERCASE_FILENAMES:
                dst = (comlocation + "/" + nfilename + ext).lower()
            else:
                dst = comlocation + "/" + nfilename + ext.lower()    
            self._log("Source:" + src, logger.DEBUG)
            self._log("Destination:" +  dst, logger.DEBUG)
            logger.fdebug("Source: " + str(src))
            logger.fdebug("Destination: " + str(dst))

            os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
            src = os.path.join(self.nzb_folder, str(nfilename + ext))
            try:
                shutil.move(src, dst)
            except (OSError, IOError):
                self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return
            #tidyup old path
            try:
                shutil.rmtree(self.nzb_folder)
            except (OSError, IOError):
                self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
                self._log("Post-Processing ABORTED.", logger.DEBUG)
                return

            self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
                    #delete entry from nzblog table
            myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                    #update snatched table to change status to Downloaded
            updater.foundsearch(comicid, issueid, down='True')
                    #force rescan of files
            updater.forceRescan(comicid)
            logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) )
            self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)

            if mylar.PROWL_ENABLED:
                pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG
                logger.info(u"Prowl request")
                prowl = notifiers.PROWL()
                prowl.notify(pushmessage,"Download and Postprocessing completed")

            if mylar.NMA_ENABLED:
                nma = notifiers.NMA()
                nma.notify(series, str(issueyear), str(issuenumOG))

            if mylar.PUSHOVER_ENABLED:
                pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG)
                logger.info(u"Pushover request")
                pushover = notifiers.PUSHOVER()
                pushover.notify(pushmessage, "Download and Post-Processing completed")
             
            # retrieve/create the corresponding comic objects

            if mylar.ENABLE_EXTRA_SCRIPTS:
                folderp = str(dst) #folder location after move/rename
                nzbn = self.nzb_name #original nzb name
                filen = str(nfilename + ext) #new filename
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata )

            return self.log
Ejemplo n.º 17
0
def libraryScan(dir=None,
                append=False,
                ComicID=None,
                ComicName=None,
                cron=None):

    if cron and not mylar.LIBRARYSCAN:
        return

    if not dir:
        dir = mylar.COMIC_DIR

    # If we're appending a dir, it's coming from the post processor which is
    # already bytestring
    if not append:
        dir = dir.encode(mylar.SYS_ENCODING)

    if not os.path.isdir(dir):
        logger.warn('Cannot find directory: %s. Not scanning' %
                    dir.decode(mylar.SYS_ENCODING, 'replace'))
        return

    logger.info('Scanning comic directory: %s' %
                dir.decode(mylar.SYS_ENCODING, 'replace'))

    basedir = dir

    comic_list = []
    comiccnt = 0
    extensions = ('cbr', 'cbz')
    cv_location = []

    for r, d, f in os.walk(dir):
        for files in f:
            if 'cvinfo' in files:
                cv_location.append(r)
                logger.fdebug('CVINFO found: ' + os.path.join(r))
            if any(files.lower().endswith('.' + x.lower())
                   for x in extensions):
                comic = files
                comicpath = os.path.join(r, files)
                comicsize = os.path.getsize(comicpath)
                logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' +
                              str(comicsize) + ' bytes')
                comiccnt += 1

                # We need the unicode path to use for logging, inserting into database
                unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING,
                                                      'replace')

                comic_dict = {
                    'ComicFilename': comic,
                    'ComicLocation': comicpath,
                    'ComicSize': comicsize,
                    'Unicode_ComicLocation': unicode_comic_path
                }
                comic_list.append(comic_dict)

        logger.info("I've found a total of " + str(comiccnt) +
                    " comics....analyzing now")
        #logger.info("comiclist: " + str(comic_list))
    myDB = db.DBConnection()

    #let's load in the watchlist to see if we have any matches.
    logger.info(
        "loading in the watchlist to see if a series is being watched already..."
    )
    watchlist = myDB.select("SELECT * from comics")
    ComicName = []
    DisplayName = []
    ComicYear = []
    ComicPublisher = []
    ComicTotal = []
    ComicID = []
    ComicLocation = []

    AltName = []
    watchcnt = 0

    watch_kchoice = []
    watchchoice = {}
    import_by_comicids = []
    import_comicids = {}

    for watch in watchlist:
        #use the comicname_filesafe to start
        watchdisplaycomic = watch['ComicName'].encode('utf-8').strip(
        )  #re.sub('[\_\#\,\/\:\;\!\$\%\&\+\'\?\@]', ' ', watch['ComicName']).encode('utf-8').strip()
        # let's clean up the name, just in case for comparison purposes...
        watchcomic = re.sub(
            '[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '',
            watch['ComicName_Filesafe']).encode('utf-8').strip()
        #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()

        if ' the ' in watchcomic.lower():
            #drop the 'the' from the watchcomic title for proper comparisons.
            watchcomic = watchcomic[-4:]

        alt_chk = "no"  # alt-checker flag (default to no)

        # account for alternate names as well
        if watch['AlternateSearch'] is not None and watch[
                'AlternateSearch'] is not 'None':
            altcomic = re.sub(
                '[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '',
                watch['AlternateSearch']).encode('utf-8').strip()
            #altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
            AltName.append(altcomic)
            alt_chk = "yes"  # alt-checker flag

        ComicName.append(watchcomic)
        DisplayName.append(watchdisplaycomic)
        ComicYear.append(watch['ComicYear'])
        ComicPublisher.append(watch['ComicPublisher'])
        ComicTotal.append(watch['Total'])
        ComicID.append(watch['ComicID'])
        ComicLocation.append(watch['ComicLocation'])
        watchcnt += 1

    logger.info("Successfully loaded " + str(watchcnt) +
                " series from your watchlist.")

    ripperlist = ['digital-', 'empire', 'dcp']

    watchfound = 0

    datelist = [
        'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
        'nov', 'dec'
    ]
    #    datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
    #    #search for number as text, and change to numeric
    #    for numbs in basnumbs:
    #        #logger.fdebug("numbs:" + str(numbs))
    #        if numbs in ComicName.lower():
    #            numconv = basnumbs[numbs]
    #            #logger.fdebug("numconv: " + str(numconv))

    issueid_list = []
    cvscanned_loc = None
    cvinfo_CID = None

    for i in comic_list:
        logger.fdebug('Analyzing : ' + i['ComicFilename'])
        comfilename = i['ComicFilename']
        comlocation = i['ComicLocation']
        issueinfo = None

        #Make sure cvinfo is checked for FIRST (so that CID can be attached to all files properly thereafter as they're scanned in)
        if os.path.dirname(comlocation) in cv_location and os.path.dirname(
                comlocation) != cvscanned_loc:

            #if comfilename == 'cvinfo':
            logger.info('comfilename: ' + comfilename)
            logger.info('cvscanned_loc: ' + str(cv_location))
            logger.info('comlocation: ' + os.path.dirname(comlocation))
            #if cvscanned_loc != comlocation:
            try:
                with open(os.path.join(os.path.dirname(comlocation),
                                       'cvinfo')) as f:
                    urllink = f.readline()

                print 'urllink: ' + str(urllink)
                if urllink:
                    cid = urllink.split('/')
                    if '4050-' in cid[-2]:
                        cvinfo_CID = re.sub('4050-', '', cid[-2]).strip()
                        logger.info(
                            'CVINFO file located within directory. Attaching everything in directory that is valid to ComicID: '
                            + str(cvinfo_CID))
                        #store the location of the cvinfo so it's applied to the correct directory (since we're scanning multile direcorties usually)
                        cvscanned_loc = os.path.dirname(comlocation)
                else:
                    logger.error(
                        "Could not read cvinfo file properly (or it does not contain any data)"
                    )
            except (OSError, IOError):
                logger.error(
                    "Could not read cvinfo file properly (or it does not contain any data)"
                )
        #else:
        #    don't scan in it again if it's already been done initially
        #    continue

        if mylar.IMP_METADATA:
            logger.info('metatagging checking enabled.')
            #if read tags is enabled during import, check here.
            if i['ComicLocation'].endswith('.cbz'):
                logger.info('Attempting to read tags present in filename: ' +
                            i['ComicLocation'])
                issueinfo = helpers.IssueDetails(i['ComicLocation'])
                if issueinfo is None:
                    pass
                else:
                    issuenotes_id = None
                    logger.info(
                        'Successfully retrieved some tags. Lets see what I can figure out.'
                    )
                    comicname = issueinfo[0]['series']
                    logger.fdebug('Series Name: ' + comicname)
                    issue_number = issueinfo[0]['issue_number']
                    logger.fdebug('Issue Number: ' + str(issue_number))
                    issuetitle = issueinfo[0]['title']
                    logger.fdebug('Issue Title: ' + issuetitle)
                    issueyear = issueinfo[0]['year']
                    logger.fdebug('Issue Year: ' + str(issueyear))
                    try:
                        issuevolume = issueinfo[0]['volume']
                    except:
                        issuevolume = None
                    # if used by ComicTagger, Notes field will have the IssueID.
                    issuenotes = issueinfo[0]['notes']
                    logger.fdebug('Notes: ' + issuenotes)
                    if issuenotes is not None:
                        if 'Issue ID' in issuenotes:
                            st_find = issuenotes.find('Issue ID')
                            tmp_issuenotes_id = re.sub(
                                "[^0-9]", " ", issuenotes[st_find:]).strip()
                            if tmp_issuenotes_id.isdigit():
                                issuenotes_id = tmp_issuenotes_id
                                logger.fdebug(
                                    'Successfully retrieved CV IssueID for ' +
                                    comicname + ' #' + str(issue_number) +
                                    ' [' + str(issuenotes_id) + ']')
                        elif 'CVDB' in issuenotes:
                            st_find = issuenotes.find('CVDB')
                            tmp_issuenotes_id = re.sub(
                                "[^0-9]", " ", issuenotes[st_find:]).strip()
                            if tmp_issuenotes_id.isdigit():
                                issuenotes_id = tmp_issuenotes_id
                                logger.fdebug(
                                    'Successfully retrieved CV IssueID for ' +
                                    comicname + ' #' + str(issue_number) +
                                    ' [' + str(issuenotes_id) + ']')
                        else:
                            logger.fdebug(
                                'Unable to retrieve IssueID from meta-tagging. If there is other metadata present I will use that.'
                            )

                    logger.fdebug("adding " + comicname +
                                  " to the import-queue!")
                    impid = comicname + '-' + str(issueyear) + '-' + str(
                        issue_number
                    )  #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
                    logger.fdebug("impid: " + str(impid))
                    #make sure we only add in those issueid's which don't already have a comicid attached via the cvinfo scan above (this is for reverse-lookup of issueids)
                    if cvinfo_CID is None:
                        issueid_list.append(issuenotes_id)
                    if cvscanned_loc == os.path.dirname(comlocation):
                        cv_cid = cvinfo_CID
                        logger.info('CVINFO_COMICID attached : ' + str(cv_cid))
                    else:
                        cv_cid = None
                    import_by_comicids.append({
                        "impid":
                        impid,
                        "comicid":
                        cv_cid,
                        "watchmatch":
                        None,
                        "displayname":
                        helpers.cleanName(comicname),
                        "comicname":
                        comicname,  #com_NAME,
                        "comicyear":
                        issueyear,
                        "volume":
                        issuevolume,
                        "issueid":
                        issuenotes_id,
                        "comfilename":
                        comfilename,
                        "comlocation":
                        comlocation.decode(mylar.SYS_ENCODING)
                    })
            else:
                logger.info(
                    i['ComicLocation'] +
                    ' is not in a metatagged format (cbz). Bypassing reading of the metatags'
                )

        if issueinfo is None:
            #let's clean up the filename for matching purposes

            cfilename = re.sub('[\_\#\,\/\:\;\-\!\$\%\&\+\'\?\@]', ' ',
                               comfilename)
            #cfilename = re.sub('\s', '_', str(cfilename))
            d_filename = re.sub('[\_\#\,\/\;\!\$\%\&\?\@]', ' ', comfilename)
            d_filename = re.sub('[\:\-\+\']', '#', d_filename)

            #strip extraspaces
            d_filename = re.sub('\s+', ' ', d_filename)
            cfilename = re.sub('\s+', ' ', cfilename)

            #versioning - remove it
            subsplit = cfilename.replace('_', ' ').split()
            volno = None
            volyr = None
            for subit in subsplit:
                if subit[0].lower() == 'v':
                    vfull = 0
                    if subit[1:].isdigit():
                        #if in format v1, v2009 etc...
                        if len(subit) > 3:
                            # if it's greater than 3 in length, then the format is Vyyyy
                            vfull = 1  # add on 1 character length to account for extra space
                        cfilename = re.sub(subit, '', cfilename)
                        d_filename = re.sub(subit, '', d_filename)
                        volno = re.sub("[^0-9]", " ", subit)
                    elif subit.lower()[:3] == 'vol':
                        #if in format vol.2013 etc
                        #because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely
                        logger.fdebug(
                            'volume indicator detected as version #:' +
                            str(subit))
                        cfilename = re.sub(subit, '', cfilename)
                        cfilename = " ".join(cfilename.split())
                        d_filename = re.sub(subit, '', d_filename)
                        d_filename = " ".join(d_filename.split())
                        volyr = re.sub("[^0-9]", " ", subit).strip()
                        logger.fdebug('volume year set as : ' + str(volyr))
            cm_cn = 0

            #we need to track the counter to make sure we are comparing the right array parts
            #this takes care of the brackets :)
            m = re.findall('[^()]+', d_filename)  #cfilename)
            lenm = len(m)
            logger.fdebug("there are " + str(lenm) + " words.")
            cnt = 0
            yearmatch = "false"
            foundonwatch = "False"
            issue = 999999

            while (cnt < lenm):
                if m[cnt] is None: break
                if m[cnt] == ' ':
                    pass
                else:
                    logger.fdebug(str(cnt) + ". Bracket Word: " + m[cnt])
                    if cnt == 0:
                        comic_andiss = m[cnt]
                        logger.fdebug("Comic: " + comic_andiss)
                        # if it's not in the standard format this will bork.
                        # let's try to accomodate (somehow).
                        # first remove the extension (if any)
                        extensions = ('cbr', 'cbz')
                        if comic_andiss.lower().endswith(extensions):
                            comic_andiss = comic_andiss[:-4]
                            logger.fdebug("removed extension from filename.")
                        #now we have to break up the string regardless of formatting.
                        #let's force the spaces.
                        comic_andiss = re.sub('_', ' ', comic_andiss)
                        cs = comic_andiss.split()
                        cs_len = len(cs)
                        cn = ''
                        ydetected = 'no'
                        idetected = 'no'
                        decimaldetect = 'no'
                        for i in reversed(xrange(len(cs))):
                            #start at the end.
                            logger.fdebug("word: " + str(cs[i]))
                            #assume once we find issue - everything prior is the actual title
                            #idetected = no will ignore everything so it will assume all title
                            if cs[i][:-2] == '19' or cs[
                                    i][:-2] == '20' and idetected == 'no':
                                logger.fdebug("year detected: " + str(cs[i]))
                                ydetected = 'yes'
                                result_comyear = cs[i]
                            elif cs[i].isdigit(
                            ) and idetected == 'no' or '.' in cs[i]:
                                if '.' in cs[i]:
                                    #make sure it's a number on either side of decimal and assume decimal issue.
                                    decst = cs[i].find('.')
                                    dec_st = cs[i][:decst]
                                    dec_en = cs[i][decst + 1:]
                                    logger.fdebug("st: " + str(dec_st))
                                    logger.fdebug("en: " + str(dec_en))
                                    if dec_st.isdigit() and dec_en.isdigit():
                                        logger.fdebug(
                                            "decimal issue detected...adjusting."
                                        )
                                        issue = dec_st + "." + dec_en
                                        logger.fdebug("issue detected: " +
                                                      str(issue))
                                        idetected = 'yes'
                                    else:
                                        logger.fdebug(
                                            "false decimal represent. Chunking to extra word."
                                        )
                                        cn = cn + cs[i] + " "
                                        #break
                                else:
                                    issue = cs[i]
                                    logger.fdebug("issue detected : " +
                                                  str(issue))
                                    idetected = 'yes'

                            elif '\#' in cs[i] or decimaldetect == 'yes':
                                logger.fdebug("issue detected: " + str(cs[i]))
                                idetected = 'yes'
                            else:
                                cn = cn + cs[i] + " "
                        if ydetected == 'no':
                            #assume no year given in filename...
                            result_comyear = "0000"
                        logger.fdebug("cm?: " + str(cn))
                        if issue is not '999999':
                            comiss = issue
                        else:
                            logger.ERROR(
                                "Invalid Issue number (none present) for " +
                                comfilename)
                            break
                        cnsplit = cn.split()
                        cname = ''
                        findcn = 0
                        while (findcn < len(cnsplit)):
                            cname = cname + cs[findcn] + " "
                            findcn += 1
                        cname = cname[:len(cname) - 1]  # drop the end space...
                        logger.fdebug('assuming name is : ' + cname)
                        com_NAME = cname
                        logger.fdebug('com_NAME : ' + com_NAME)
                        yearmatch = "True"
                    else:
                        logger.fdebug('checking ' + m[cnt])
                        # we're assuming that the year is in brackets (and it should be damnit)
                        if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
                            logger.fdebug('year detected: ' + str(m[cnt]))
                            ydetected = 'yes'
                            result_comyear = m[cnt]
                        elif m[cnt][:3].lower() in datelist:
                            logger.fdebug(
                                'possible issue date format given - verifying')
                            #if the date of the issue is given as (Jan 2010) or (January 2010) let's adjust.
                            #keeping in mind that ',' and '.' are already stripped from the string
                            if m[cnt][-4:].isdigit():
                                ydetected = 'yes'
                                result_comyear = m[cnt][-4:]
                                logger.fdebug('Valid Issue year of ' +
                                              str(result_comyear) +
                                              'detected in format of ' +
                                              str(m[cnt]))
                cnt += 1

            displength = len(cname)
            logger.fdebug('cname length : ' + str(displength) + ' --- ' +
                          str(cname))
            logger.fdebug('d_filename is : ' + d_filename)
            charcount = d_filename.count('#')
            logger.fdebug('charcount is : ' + str(charcount))
            if charcount > 0:
                logger.fdebug('entering loop')
                for i, m in enumerate(re.finditer('\#', d_filename)):
                    if m.end() <= displength:
                        logger.fdebug(comfilename[m.start():m.end()])
                        # find occurance in c_filename, then replace into d_filname so special characters are brought across
                        newchar = comfilename[m.start():m.end()]
                        logger.fdebug('newchar:' + str(newchar))
                        d_filename = d_filename[:m.start()] + str(
                            newchar) + d_filename[m.end():]
                        logger.fdebug('d_filename:' + str(d_filename))

            dispname = d_filename[:displength]
            logger.fdebug('dispname : ' + dispname)

            splitit = []
            watchcomic_split = []
            logger.fdebug("filename comic and issue: " + comic_andiss)

            #changed this from '' to ' '
            comic_iss_b4 = re.sub('[\-\:\,]', ' ', comic_andiss)
            comic_iss = comic_iss_b4.replace('.', ' ')
            comic_iss = re.sub('[\s+]', ' ', comic_iss).strip()
            logger.fdebug("adjusted comic and issue: " + str(comic_iss))
            #remove 'the' from here for proper comparisons.
            if ' the ' in comic_iss.lower():
                comic_iss = re.sub('\\bthe\\b', '', comic_iss).strip()
            splitit = comic_iss.split(None)
            logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " +
                          str(comic_iss))
            #here we cycle through the Watchlist looking for a match.
            while (cm_cn < watchcnt):
                #setup the watchlist
                comname = ComicName[cm_cn]
                comyear = ComicYear[cm_cn]
                compub = ComicPublisher[cm_cn]
                comtotal = ComicTotal[cm_cn]
                comicid = ComicID[cm_cn]
                watch_location = ComicLocation[cm_cn]

                # there shouldn't be an issue in the comic now, so let's just assume it's all gravy.
                splitst = len(splitit)
                watchcomic_split = helpers.cleanName(comname)
                watchcomic_split = re.sub('[\-\:\,\.]', ' ',
                                          watchcomic_split).split(None)

                logger.fdebug(
                    str(splitit) + " file series word count: " + str(splitst))
                logger.fdebug(
                    str(watchcomic_split) + " watchlist word count: " +
                    str(len(watchcomic_split)))
                if (splitst) != len(watchcomic_split):
                    logger.fdebug("incorrect comic lengths...not a match")


#                    if str(splitit[0]).lower() == "the":
#                        logger.fdebug("THE word detected...attempting to adjust pattern matching")
#                        splitit[0] = splitit[4:]
                else:
                    logger.fdebug("length match..proceeding")
                    n = 0
                    scount = 0
                    logger.fdebug("search-length: " + str(splitst))
                    logger.fdebug("Watchlist-length: " +
                                  str(len(watchcomic_split)))
                    while (n <= (splitst) - 1):
                        logger.fdebug("splitit: " + str(splitit[n]))
                        if n < (splitst) and n < len(watchcomic_split):
                            logger.fdebug(
                                str(n) + " Comparing: " +
                                str(watchcomic_split[n]) + " .to. " +
                                str(splitit[n]))
                            if '+' in watchcomic_split[n]:
                                watchcomic_split[n] = re.sub(
                                    '+', '', str(watchcomic_split[n]))
                            if str(watchcomic_split[n].lower()) in str(
                                    splitit[n].lower()
                            ) and len(watchcomic_split[n]) >= len(splitit[n]):
                                logger.fdebug("word matched on : " +
                                              str(splitit[n]))
                                scount += 1
                            #elif ':' in splitit[n] or '-' in splitit[n]:
                            #    splitrep = splitit[n].replace('-', '')
                            #    logger.fdebug("non-character keyword...skipped on " + splitit[n])
                        elif str(splitit[n]).lower().startswith('v'):
                            logger.fdebug("possible versioning..checking")
                            #we hit a versioning # - account for it
                            if splitit[n][1:].isdigit():
                                comicversion = str(splitit[n])
                                logger.fdebug("version found: " +
                                              str(comicversion))
                        else:
                            logger.fdebug("Comic / Issue section")
                            if splitit[n].isdigit():
                                logger.fdebug("issue detected")
                            else:
                                logger.fdebug("non-match for: " +
                                              str(splitit[n]))
                                pass
                        n += 1
                    #set the match threshold to 80% (for now)
                    # if it's less than 80% consider it a non-match and discard.
                    #splitit has to splitit-1 because last position is issue.
                    wordcnt = int(scount)
                    logger.fdebug("scount:" + str(wordcnt))
                    totalcnt = int(splitst)
                    logger.fdebug("splitit-len:" + str(totalcnt))
                    spercent = (wordcnt / totalcnt) * 100
                    logger.fdebug("we got " + str(spercent) + " percent.")
                    if int(spercent) >= 80:
                        logger.fdebug("it's a go captain... - we matched " +
                                      str(spercent) + "%!")
                        logger.fdebug("this should be a match!")
                        logger.fdebug("issue we found for is : " + str(comiss))
                        #set the year to the series we just found ;)
                        result_comyear = comyear
                        #issue comparison now as well
                        logger.info(u"Found " + comname + " (" + str(comyear) +
                                    ") issue: " + str(comiss))
                        watchmatch = str(comicid)
                        dispname = DisplayName[cm_cn]
                        foundonwatch = "True"
                        break
                    elif int(spercent) < 80:
                        logger.fdebug("failure - we only got " +
                                      str(spercent) + "% right!")
                cm_cn += 1

            if foundonwatch == "False":
                watchmatch = None
            #---if it's not a match - send it to the importer.
            n = 0

            if volyr is None:
                if result_comyear is None:
                    result_comyear = '0000'  #no year in filename basically.
            else:
                if result_comyear is None:
                    result_comyear = volyr
            if volno is None:
                if volyr is None:
                    vol_label = None
                else:
                    vol_label = volyr
            else:
                vol_label = volno

            logger.fdebug("adding " + com_NAME + " to the import-queue!")
            impid = dispname + '-' + str(result_comyear) + '-' + str(
                comiss
            )  #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
            logger.fdebug("impid: " + str(impid))
            if cvscanned_loc == os.path.dirname(comlocation):
                cv_cid = cvinfo_CID
                logger.info('CVINFO_COMICID attached : ' + str(cv_cid))
            else:
                cv_cid = None
            import_by_comicids.append({
                "impid":
                impid,
                "comicid":
                cv_cid,
                "issueid":
                None,
                "watchmatch":
                watchmatch,
                "displayname":
                dispname,
                "comicname":
                dispname,  #com_NAME,
                "comicyear":
                result_comyear,
                "volume":
                vol_label,
                "comfilename":
                comfilename,
                "comlocation":
                comlocation.decode(mylar.SYS_ENCODING)
            })
    #logger.fdebug('import_by_ids: ' + str(import_by_comicids))

    #reverse lookup all of the gathered IssueID's in order to get the related ComicID
    vals = mylar.cv.getComic(None, 'import', comicidlist=issueid_list)
    logger.fdebug('vals returned:' + str(vals))

    if len(watch_kchoice) > 0:
        watchchoice['watchlist'] = watch_kchoice
        #logger.fdebug("watchchoice: " + str(watchchoice))

        logger.info("I have found " + str(watchfound) + " out of " +
                    str(comiccnt) +
                    " comics for series that are being watched.")
        wat = 0
        comicids = []

        if watchfound > 0:
            if mylar.IMP_MOVE:
                logger.info(
                    'You checked off Move Files...so that\'s what I am going to do'
                )
                #check to see if Move Files is enabled.
                #if not being moved, set the archive bit.
                logger.fdebug('Moving files into appropriate directory')
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comlocation = watch_the_list['ComicLocation']
                    watch_comicid = watch_the_list['ComicID']
                    watch_comicname = watch_the_list['ComicName']
                    watch_comicyear = watch_the_list['ComicYear']
                    watch_comiciss = watch_the_list['ComicIssue']
                    logger.fdebug('ComicLocation: ' + watch_comlocation)
                    orig_comlocation = watch_the_list['OriginalLocation']
                    orig_filename = watch_the_list['OriginalFilename']
                    logger.fdebug('Orig. Location: ' + orig_comlocation)
                    logger.fdebug('Orig. Filename: ' + orig_filename)
                    #before moving check to see if Rename to Mylar structure is enabled.
                    if mylar.IMP_RENAME:
                        logger.fdebug(
                            'Renaming files according to configuration details : '
                            + str(mylar.FILE_FORMAT))
                        renameit = helpers.rename_param(
                            watch_comicid, watch_comicname, watch_comicyear,
                            watch_comiciss)
                        nfilename = renameit['nfilename']

                        dst_path = os.path.join(watch_comlocation, nfilename)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    else:
                        logger.fdebug(
                            'Renaming files not enabled, keeping original filename(s)'
                        )
                        dst_path = os.path.join(watch_comlocation,
                                                orig_filename)

                    #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                    #src = os.path.join(, str(nfilename + ext))
                    logger.fdebug('I am going to move ' + orig_comlocation +
                                  ' to ' + dst_path)
                    try:
                        shutil.move(orig_comlocation, dst_path)
                    except (OSError, IOError):
                        logger.info(
                            "Failed to move directory - check directories and manually re-run."
                        )
                    wat += 1
            else:
                # if move files isn't enabled, let's set all found comics to Archive status :)
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comicid = watch_the_list['ComicID']
                    watch_issue = watch_the_list['ComicIssue']
                    logger.fdebug('ComicID: ' + str(watch_comicid))
                    logger.fdebug('Issue#: ' + str(watch_issue))
                    issuechk = myDB.selectone(
                        "SELECT * from issues where ComicID=? AND INT_IssueNumber=?",
                        [watch_comicid, watch_issue]).fetchone()
                    if issuechk is None:
                        logger.fdebug('No matching issues for this comic#')
                    else:
                        logger.fdebug('...Existing status: ' +
                                      str(issuechk['Status']))
                        control = {"IssueID": issuechk['IssueID']}
                        values = {"Status": "Archived"}
                        logger.fdebug('...changing status of ' +
                                      str(issuechk['Issue_Number']) +
                                      ' to Archived ')
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    wat += 1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd):
                    logger.fdebug('Rescanning.. ' + str(c))
                    updater.forceRescan(c)
        if not len(import_by_comicids):
            return "Completed"
    if len(import_by_comicids) > 0:
        import_comicids['comic_info'] = import_by_comicids
        if vals:
            import_comicids['issueid_info'] = vals
        else:
            import_comicids['issueid_info'] = None

        logger.fdebug('import comicids: ' + str(import_by_comicids))

        return import_comicids, len(import_by_comicids)
Ejemplo n.º 18
0
def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None):

    if cron and not mylar.LIBRARYSCAN:
        return
        
    if not dir:
        dir = mylar.COMIC_DIR
    
    # If we're appending a dir, it's coming from the post processor which is
    # already bytestring
    if not append:
        dir = dir.encode(mylar.SYS_ENCODING)
        
    if not os.path.isdir(dir):
        logger.warn('Cannot find directory: %s. Not scanning' % dir.decode(mylar.SYS_ENCODING, 'replace'))
        return

    
    logger.info('Scanning comic directory: %s' % dir.decode(mylar.SYS_ENCODING, 'replace'))

    basedir = dir

    comic_list = []
    comiccnt = 0
    extensions = ('cbr','cbz')
    for r,d,f in os.walk(dir):
        #for directory in d[:]:
        #    if directory.startswith("."):
        #        d.remove(directory)
        for files in f:
            if any(files.lower().endswith('.' + x.lower()) for x in extensions):
                comic = files
                comicpath = os.path.join(r, files)
                comicsize = os.path.getsize(comicpath)
                print "Comic: " + comic
                print "Comic Path: " + comicpath
                print "Comic Size: " + str(comicsize)

                # We need the unicode path to use for logging, inserting into database
                unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING, 'replace')

                comiccnt+=1
                comic_dict = { 'ComicFilename':           comic,
                               'ComicLocation':           comicpath,
                               'ComicSize':               comicsize,
                               'Unicode_ComicLocation':   unicode_comic_path }
                comic_list.append(comic_dict)

        logger.info("I've found a total of " + str(comiccnt) + " comics....analyzing now")
        logger.info("comiclist: " + str(comic_list))
    myDB = db.DBConnection()

    #let's load in the watchlist to see if we have any matches.
    logger.info("loading in the watchlist to see if a series is being watched already...")
    watchlist = myDB.action("SELECT * from comics")
    ComicName = []
    ComicYear = []
    ComicPublisher = []
    ComicTotal = []
    ComicID = []
    ComicLocation = []

    AltName = []
    watchcnt = 0

    watch_kchoice = []
    watchchoice = {}
    import_by_comicids = []
    import_comicids = {}

    for watch in watchlist:
        # let's clean up the name, just in case for comparison purposes...
        watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', watch['ComicName']).encode('utf-8').strip()
        #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()

        if ' the ' in watchcomic.lower():
            #drop the 'the' from the watchcomic title for proper comparisons.
            watchcomic = watchcomic[-4:]

        alt_chk = "no" # alt-checker flag (default to no)
         
        # account for alternate names as well
        if watch['AlternateSearch'] is not None and watch['AlternateSearch'] is not 'None':
            altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', watch['AlternateSearch']).encode('utf-8').strip()
            #altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
            AltName.append(altcomic)
            alt_chk = "yes"  # alt-checker flag

        ComicName.append(watchcomic)
        ComicYear.append(watch['ComicYear'])
        ComicPublisher.append(watch['ComicPublisher'])
        ComicTotal.append(watch['Total'])
        ComicID.append(watch['ComicID'])
        ComicLocation.append(watch['ComicLocation'])
        watchcnt+=1

    logger.info("Successfully loaded " + str(watchcnt) + " series from your watchlist.")

    ripperlist=['digital-',
                'empire',
                'dcp']

    watchfound = 0

    for i in comic_list:
        print i['ComicFilename']

        comfilename = i['ComicFilename']
        comlocation = i['ComicLocation']
        #let's clean up the filename for matching purposes

        cfilename = re.sub('[\_\#\,\/\:\;\-\!\$\%\&\+\'\?\@]', ' ', comfilename)
        #cfilename = re.sub('\s', '_', str(cfilename))

        cm_cn = 0

        #we need to track the counter to make sure we are comparing the right array parts  
        #this takes care of the brackets :)
        m = re.findall('[^()]+', cfilename)
        lenm = len(m)
        print ("there are " + str(lenm) + " words.")
        cnt = 0
        yearmatch = "false"
        foundonwatch = "False"
        issue = 999999

        while (cnt < lenm):
            if m[cnt] is None: break
            if m[cnt] == ' ':
                pass
            else:
                logger.fdebug(str(cnt) + ". Bracket Word: " + m[cnt])
                if cnt == 0:
                    comic_andiss = m[cnt]
                    logger.fdebug("Comic: " + comic_andiss)
                    # if it's not in the standard format this will bork.
                    # let's try to accomodate (somehow).
                    # first remove the extension (if any)
                    extensions = ('cbr', 'cbz')
                    if comic_andiss.lower().endswith(extensions):
                        comic_andiss = comic_andiss[:-4]
                        print ("removed extension from filename.")
                    #now we have to break up the string regardless of formatting.
                    #let's force the spaces.
                    comic_andiss = re.sub('_', ' ', comic_andiss)
                    cs = comic_andiss.split()
                    cs_len = len(cs)
                    cn = ''
                    ydetected = 'no'
                    idetected = 'no'
                    decimaldetect = 'no'
                    for i in reversed(xrange(len(cs))):
                        #start at the end.
                        print ("word: " + str(cs[i]))
                        #assume once we find issue - everything prior is the actual title
                        #idetected = no will ignore everything so it will assume all title                            
                        if cs[i][:-2] == '19' or cs[i][:-2] == '20' and idetected == 'no':
                            print ("year detected: " + str(cs[i]))
                            ydetected = 'yes'
                            result_comyear = cs[i]
                        elif cs[i].isdigit() and idetected == 'no' or '.' in cs[i]:
                            issue = cs[i]
                            print ("issue detected : " + str(issue))
                            idetected = 'yes'
                            if '.' in cs[i]:
                                #make sure it's a number on either side of decimal and assume decimal issue.
                                decst = cs[i].find('.')
                                dec_st = cs[i][:decst]
                                dec_en = cs[i][decst+1:]
                                print ("st: " + str(dec_st))
                                print ("en: " + str(dec_en))
                                if dec_st.isdigit() and dec_en.isdigit():
                                    print ("decimal issue detected...adjusting.")
                                    issue = dec_st + "." + dec_en
                                    print ("issue detected: " + str(issue))
                                    idetected = 'yes'
                                else:
                                    print ("false decimal represent. Chunking to extra word.")
                                    cn = cn + cs[i] + " "
                                    break
                        elif '\#' in cs[i] or decimaldetect == 'yes':
                            print ("issue detected: " + str(cs[i]))
                            idetected = 'yes'

                        else: cn = cn + cs[i] + " "
                    if ydetected == 'no':
                        #assume no year given in filename...
                        result_comyear = "0000"
                    print ("cm?: " + str(cn))
                    if issue is not '999999':
                        comiss = issue
                    else:
                        logger.ERROR("Invalid Issue number (none present) for " + comfilename)
                        break
                    cnsplit = cn.split()
                    cname = ''
                    findcn = 0
                    while (findcn < len(cnsplit)):
                        cname = cname + cs[findcn] + " "
                        findcn+=1
                    cname = cname[:len(cname)-1] # drop the end space...
                    print ("assuming name is : " + cname)
                    com_NAME = cname
                    print ("com_NAME : " + com_NAME)
                    yearmatch = "True"
                else:
                    # we're assuming that the year is in brackets (and it should be damnit)
                    if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
                        print ("year detected: " + str(m[cnt]))
                        ydetected = 'yes'
                        result_comyear = m[cnt]
            cnt+=1

        splitit = []
        watchcomic_split = []
        logger.fdebug("filename comic and issue: " + cfilename)
        #changed this from '' to ' '
        comic_iss_b4 = re.sub('[\-\:\,]', ' ', com_NAME)
        comic_iss = comic_iss_b4.replace('.',' ')
        logger.fdebug("adjusted  comic and issue: " + str(comic_iss))
        #remove 'the' from here for proper comparisons.
        if ' the ' in comic_iss.lower():
            comic_iss = comic_iss[-4:]
        splitit = comic_iss.split(None)
        logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss))
        #bmm = re.findall('v\d', comic_iss)
        #if len(bmm) > 0: splitst = len(splitit) - 2
        #else: splitst = len(splitit) - 1
      #-----
        #here we cycle through the Watchlist looking for a match.
        while (cm_cn < watchcnt):
            #setup the watchlist
            comname = ComicName[cm_cn]
            print ("watch_comic:" + comname)
            comyear = ComicYear[cm_cn]
            compub = ComicPublisher[cm_cn]
            comtotal = ComicTotal[cm_cn]
            comicid = ComicID[cm_cn]
            watch_location = ComicLocation[cm_cn]

#            if splitit[(len(splitit)-1)].isdigit():
#                #compares - if the last digit and second last digit are #'s seperated by spaces assume decimal
#                comic_iss = splitit[(len(splitit)-1)]
#                splitst = len(splitit) - 1
#                if splitit[(len(splitit)-2)].isdigit():
#                    # for series that have a digit at the end, it screws up the logistics.
#                    i = 1
#                    chg_comic = splitit[0]
#                    while (i < (len(splitit)-1)):
#                        chg_comic = chg_comic + " " + splitit[i]
#                        i+=1
#                    logger.fdebug("chg_comic:" + str(chg_comic))
#                    if chg_comic.upper() == comname.upper():
#                        logger.fdebug("series contains numerics...adjusting..")
#                    else:
#                        changeup = "." + splitit[(len(splitit)-1)]
#                        logger.fdebug("changeup to decimal: " + str(changeup))
#                        comic_iss = splitit[(len(splitit)-2)] + "." + comic_iss
#                        splitst = len(splitit) - 2
#            else:
              # if the nzb name doesn't follow the series-issue-year format even closely..ignore nzb
#               logger.fdebug("invalid naming format of filename detected - cannot properly determine issue")
#               continue

            # make sure that things like - in watchcomic are accounted for when comparing to nzb.

   # there shouldn't be an issue in the comic now, so let's just assume it's all gravy.
            splitst = len(splitit)
            watchcomic_split = helpers.cleanName(comname)
            watchcomic_split = re.sub('[\-\:\,\.]', ' ', watchcomic_split).split(None)

            logger.fdebug(str(splitit) + " file series word count: " + str(splitst))
            logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split)))
            if (splitst) != len(watchcomic_split):
                logger.fdebug("incorrect comic lengths...not a match")
#                if str(splitit[0]).lower() == "the":
#                    logger.fdebug("THE word detected...attempting to adjust pattern matching")
#                    splitit[0] = splitit[4:]
            else:
                logger.fdebug("length match..proceeding")
                n = 0
                scount = 0
                logger.fdebug("search-length: " + str(splitst))
                logger.fdebug("Watchlist-length: " + str(len(watchcomic_split)))
                while ( n <= (splitst)-1 ):
                    logger.fdebug("splitit: " + str(splitit[n]))
                    if n < (splitst) and n < len(watchcomic_split):
                        logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n]))
                        if '+' in watchcomic_split[n]:
                            watchcomic_split[n] = re.sub('+', '', str(watchcomic_split[n]))
                        if str(watchcomic_split[n].lower()) in str(splitit[n].lower()) and len(watchcomic_split[n]) >= len(splitit[n]):
                            logger.fdebug("word matched on : " + str(splitit[n]))
                            scount+=1
                        #elif ':' in splitit[n] or '-' in splitit[n]:
                        #    splitrep = splitit[n].replace('-', '')
                        #    print ("non-character keyword...skipped on " + splitit[n])
                    elif str(splitit[n]).lower().startswith('v'):
                        logger.fdebug("possible versioning..checking")
                        #we hit a versioning # - account for it
                        if splitit[n][1:].isdigit():
                            comicversion = str(splitit[n])
                            logger.fdebug("version found: " + str(comicversion))
                    else:
                        logger.fdebug("Comic / Issue section")
                        if splitit[n].isdigit():
                            logger.fdebug("issue detected")
                            #comiss = splitit[n]
#                            comicNAMER = n - 1
#                            com_NAME = splitit[0]
#                           cmnam = 1
#                            while (cmnam <= comicNAMER):
#                                com_NAME = str(com_NAME) + " " + str(splitit[cmnam])
#                                cmnam+=1
#                            logger.fdebug("comic: " + str(com_NAME))
                        else:
                            logger.fdebug("non-match for: "+ str(splitit[n]))
                            pass
                    n+=1
                #set the match threshold to 80% (for now)
                # if it's less than 80% consider it a non-match and discard.
                #splitit has to splitit-1 because last position is issue.
                wordcnt = int(scount)
                logger.fdebug("scount:" + str(wordcnt))
                totalcnt = int(splitst)
                logger.fdebug("splitit-len:" + str(totalcnt))
                spercent = (wordcnt/totalcnt) * 100
                logger.fdebug("we got " + str(spercent) + " percent.")
                if int(spercent) >= 80:
                    logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!")
                    logger.fdebug("this should be a match!")
#                    if '.' in comic_iss:
#                        comisschk_find = comic_iss.find('.')
#                        comisschk_b4dec = comic_iss[:comisschk_find]
#                        comisschk_decval = comic_iss[comisschk_find+1:]
#                        logger.fdebug("Found IssueNumber: " + str(comic_iss))
#                        logger.fdebug("..before decimal: " + str(comisschk_b4dec))
#                        logger.fdebug("...after decimal: " + str(comisschk_decval))
#                        #--let's make sure we don't wipe out decimal issues ;)
#                        if int(comisschk_decval) == 0:
#                            ciss = comisschk_b4dec
#                            cintdec = int(comisschk_decval)
#                        else:
#                            if len(comisschk_decval) == 1:
#                                ciss = comisschk_b4dec + "." + comisschk_decval
#                                cintdec = int(comisschk_decval) * 10
#                            else:
#                                ciss = comisschk_b4dec + "." + comisschk_decval.rstrip('0')
#                                cintdec = int(comisschk_decval.rstrip('0')) * 10
#                        comintIss = (int(comisschk_b4dec) * 1000) + cintdec
#                    else:
#                        comintIss = int(comic_iss) * 1000
                    logger.fdebug("issue we found for is : " + str(comiss))
                    #set the year to the series we just found ;)
                    result_comyear = comyear
                    #issue comparison now as well
                    logger.info(u"Found " + comname + " (" + str(comyear) + ") issue: " + str(comiss))
#                    watchfound+=1
                    watchmatch = str(comicid)
#                    watch_kchoice.append({
#                       "ComicID":         str(comicid),
#                       "ComicName":       str(comname),
#                       "ComicYear":       str(comyear),
#                       "ComicIssue":      str(int(comic_iss)),
#                       "ComicLocation":   str(watch_location),
#                       "OriginalLocation" : str(comlocation),
#                       "OriginalFilename" : str(comfilename)
#                                        })
                    foundonwatch = "True"
                    break
                elif int(spercent) < 80:
                    logger.fdebug("failure - we only got " + str(spercent) + "% right!")
            cm_cn+=1

        if foundonwatch == "False":
            watchmatch = None
        #---if it's not a match - send it to the importer.
        n = 0
#        print ("comic_andiss : " + str(comic_andiss))
#        csplit = comic_andiss.split(None)
#        while ( n <= (len(csplit)-1) ):
#            print ("csplit:" + str(csplit[n]))
#            if csplit[n].isdigit():
#                logger.fdebug("issue detected")
#                comiss = splitit[n]
#                logger.fdebug("issue # : " + str(comiss))
#                comicNAMER = n - 1
#                com_NAME = csplit[0]
#                cmnam = 1
#                while (cmnam <= comicNAMER):
#                    com_NAME = str(com_NAME) + " " + str(csplit[cmnam])
#                    cmnam+=1
#                logger.fdebug("comic: " + str(com_NAME))
#            n+=1
        if result_comyear is None: result_comyear = '0000' #no year in filename basically.
        print ("adding " + com_NAME + " to the import-queue!")
        impid = com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
        print ("impid: " + str(impid))
        import_by_comicids.append({ 
            "impid": impid,
            "watchmatch": watchmatch,
            "comicname" : com_NAME,
            "comicyear" : result_comyear,
            "comfilename" : comfilename,
            "comlocation" : comlocation.decode(mylar.SYS_ENCODING)
                                   })

    if len(watch_kchoice) > 0:
        watchchoice['watchlist'] = watch_kchoice
        print ("watchchoice: " + str(watchchoice))

        logger.info("I have found " + str(watchfound) + " out of " + str(comiccnt) + " comics for series that are being watched.")
        wat = 0
        comicids = []

        if watchfound > 0:
            if mylar.IMP_MOVE:
                logger.info("You checked off Move Files...so that's what I'm going to do") 
                #check to see if Move Files is enabled.
                #if not being moved, set the archive bit.
                print("Moving files into appropriate directory")
                while (wat < watchfound): 
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comlocation = watch_the_list['ComicLocation']
                    watch_comicid = watch_the_list['ComicID']
                    watch_comicname = watch_the_list['ComicName']
                    watch_comicyear = watch_the_list['ComicYear']
                    watch_comiciss = watch_the_list['ComicIssue']
                    print ("ComicLocation: " + str(watch_comlocation))
                    orig_comlocation = watch_the_list['OriginalLocation']
                    orig_filename = watch_the_list['OriginalFilename'] 
                    print ("Orig. Location: " + str(orig_comlocation))
                    print ("Orig. Filename: " + str(orig_filename))
                    #before moving check to see if Rename to Mylar structure is enabled.
                    if mylar.IMP_RENAME:
                        print("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT))
                        renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss)
                        nfilename = renameit['nfilename']
                    
                        dst_path = os.path.join(watch_comlocation,nfilename)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    else:
                        print("Renaming files not enabled, keeping original filename(s)")
                        dst_path = os.path.join(watch_comlocation,orig_filename)

                    #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                    #src = os.path.join(, str(nfilename + ext))
                    print ("I'm going to move " + str(orig_comlocation) + " to .." + str(dst_path))
                    try:
                        shutil.move(orig_comlocation, dst_path)
                    except (OSError, IOError):
                        logger.info("Failed to move directory - check directories and manually re-run.")
                    wat+=1
            else:
                # if move files isn't enabled, let's set all found comics to Archive status :)
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comicid = watch_the_list['ComicID']
                    watch_issue = watch_the_list['ComicIssue']
                    print ("ComicID: " + str(watch_comicid))
                    print ("Issue#: " + str(watch_issue))
                    issuechk = myDB.action("SELECT * from issues where ComicID=? AND INT_IssueNumber=?", [watch_comicid, watch_issue]).fetchone()
                    if issuechk is None:
                        print ("no matching issues for this comic#")
                    else:
                        print("...Existing status: " + str(issuechk['Status']))
                        control = {"IssueID":   issuechk['IssueID']}
                        values = { "Status":   "Archived"}
                        print ("...changing status of " + str(issuechk['Issue_Number']) + " to Archived ")
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)                    
                    wat+=1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd ):
                    print ("Rescanning.. " + str(c))
                    updater.forceRescan(c) 
        if not len(import_by_comicids):
            return "Completed"
    if len(import_by_comicids) > 0:
        import_comicids['comic_info'] = import_by_comicids
        print ("import comicids: " + str(import_by_comicids))
        return import_comicids, len(import_by_comicids)
Ejemplo n.º 19
0
def GCDimport(gcomicid, pullupd=None, imported=None, ogcname=None):
    # this is for importing via GCD only and not using CV.
    # used when volume spanning is discovered for a Comic (and can't be added using CV).
    # Issue Counts are wrong (and can't be added).

    # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
    # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)

    gcdcomicid = gcomicid
    myDB = db.DBConnection()

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID": gcdcomicid}

    comic = myDB.action(
        'SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation, ComicPublisher FROM comics WHERE ComicID=?',
        [gcomicid]).fetchone()
    ComicName = comic[0]
    ComicYear = comic[1]
    ComicIssues = comic[2]
    ComicPublished = comic[3]
    comlocation = comic[5]
    ComicPublisher = comic[6]
    #ComicImage = comic[4]
    #print ("Comic:" + str(ComicName))

    newValueDict = {"Status": "Loading"}
    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now
    #comic = cv.getComic(comicid,'comic')

    if not comic:
        logger.warn("Error fetching comic. ID for : " + gcdcomicid)
        if dbcomic is None:
            newValueDict = {
                "ComicName":
                "Fetch failed, try refreshing. (%s)" % (gcdcomicid),
                "Status": "Active"
            }
        else:
            newValueDict = {"Status": "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return

    #run the re-sortorder here in order to properly display the page
    if pullupd is None:
        helpers.ComicSort(comicorder=mylar.COMICSORT, imported=gcomicid)

    if ComicName.startswith('The '):
        sortname = ComicName[4:]
    else:
        sortname = ComicName

    logger.info(u"Now adding/updating: " + ComicName)
    #--Now that we know ComicName, let's try some scraping
    #--Start
    # gcd will return issue details (most importantly publishing date)
    comicid = gcomicid[1:]
    resultURL = "/series/" + str(comicid) + "/"
    gcdinfo = parseit.GCDdetails(comseries=None,
                                 resultURL=resultURL,
                                 vari_loop=0,
                                 ComicID=gcdcomicid,
                                 TotalIssues=ComicIssues,
                                 issvariation=None,
                                 resultPublished=None)
    if gcdinfo == "No Match":
        logger.warn("No matching result found for " + ComicName + " (" +
                    ComicYear + ")")
        updater.no_searchresults(gcomicid)
        nomatch = "true"
        return nomatch
    logger.info(u"Sucessfully retrieved details for " + ComicName)
    # print ("Series Published" + parseit.resultPublished)
    #--End

    ComicImage = gcdinfo['ComicImage']

    #comic book location on machine
    # setup default location here
    if comlocation is None:
        # let's remove the non-standard characters here.
        u_comicnm = ComicName
        u_comicname = u_comicnm.encode('ascii', 'ignore').strip()
        if ':' in u_comicname or '/' in u_comicname or ',' in u_comicname:
            comicdir = u_comicname
            if ':' in comicdir:
                comicdir = comicdir.replace(':', '')
            if '/' in comicdir:
                comicdir = comicdir.replace('/', '-')
            if ',' in comicdir:
                comicdir = comicdir.replace(',', '')
        else:
            comicdir = u_comicname

        series = comicdir
        publisher = ComicPublisher
        year = ComicYear

        #do work to generate folder path
        values = {
            '$Series': series,
            '$Publisher': publisher,
            '$Year': year,
            '$series': series.lower(),
            '$publisher': publisher.lower(),
            '$Volume': year
        }

        if mylar.FOLDER_FORMAT == '':
            comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic[
                'ComicYear'] + ")"
        else:
            comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(
                mylar.FOLDER_FORMAT, values)

        #comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")"
        if mylar.DESTINATION_DIR == "":
            logger.error(
                u"There is no general directory specified - please specify in Config/Post-Processing."
            )
            return
        if mylar.REPLACE_SPACES:
            #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
            comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)

    #if it doesn't exist - create it (otherwise will bugger up later on)
    if os.path.isdir(str(comlocation)):
        logger.info(u"Directory (" + str(comlocation) +
                    ") already exists! Continuing...")
    else:
        #print ("Directory doesn't exist!")
        #try:
        #    os.makedirs(str(comlocation))
        #    logger.info(u"Directory successfully created at: " + str(comlocation))
        #except OSError:
        #    logger.error(u"Could not create comicdir : " + str(comlocation))
        filechecker.validateAndCreateDirectory(comlocation, True)

    comicIssues = gcdinfo['totalissues']

    #let's download the image...
    if os.path.exists(mylar.CACHE_DIR): pass
    else:
        #let's make the dir.
        try:
            os.makedirs(str(mylar.CACHE_DIR))
            logger.info(u"Cache Directory successfully created at: " +
                        str(mylar.CACHE_DIR))

        except OSError:
            logger.error(u"Could not create cache dir : " +
                         str(mylar.CACHE_DIR))

    coverfile = os.path.join(mylar.CACHE_DIR, str(gcomicid) + ".jpg")

    #try:
    urllib.urlretrieve(str(ComicImage), str(coverfile))
    try:
        with open(str(coverfile)) as f:
            ComicImage = os.path.join('cache', str(gcomicid) + ".jpg")

            #this is for Firefox when outside the LAN...it works, but I don't know how to implement it
            #without breaking the normal flow for inside the LAN (above)
            #ComicImage = "http://" + str(mylar.HTTP_HOST) + ":" + str(mylar.HTTP_PORT) + "/cache/" + str(comi$

            logger.info(u"Sucessfully retrieved cover for " + ComicName)
            #if the comic cover local is checked, save a cover.jpg to the series folder.
            if mylar.COMIC_COVER_LOCAL:
                comiclocal = os.path.join(str(comlocation) + "/cover.jpg")
                shutil.copy(ComicImage, comiclocal)
    except IOError as e:
        logger.error(u"Unable to save cover locally at this time.")

    #if comic['ComicVersion'].isdigit():
    #    comicVol = "v" + comic['ComicVersion']
    #else:
    #    comicVol = None

    controlValueDict = {"ComicID": gcomicid}
    newValueDict = {
        "ComicName": ComicName,
        "ComicSortName": sortname,
        "ComicYear": ComicYear,
        "Total": comicIssues,
        "ComicLocation": comlocation,
        #"ComicVersion":     comicVol,
        "ComicImage": ComicImage,
        #"ComicPublisher":   comic['ComicPublisher'],
        #"ComicPublished":   comicPublished,
        "DateAdded": helpers.today(),
        "Status": "Loading"
    }

    myDB.upsert("comics", newValueDict, controlValueDict)

    #comicsort here...
    #run the re-sortorder here in order to properly display the page
    if pullupd is None:
        helpers.ComicSort(sequence='update')

    logger.info(u"Sucessfully retrieved issue details for " + ComicName)
    n = 0
    iscnt = int(comicIssues)
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    #let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    #print ("total issues:" + str(iscnt))
    #---removed NEW code here---
    logger.info(u"Now adding/updating issues for " + ComicName)
    bb = 0
    while (bb <= iscnt):
        #---NEW.code
        try:
            gcdval = gcdinfo['gcdchoice'][bb]
            #print ("gcdval: " + str(gcdval))
        except IndexError:
            #account for gcd variation here
            if gcdinfo['gcdvariation'] == 'gcd':
                #print ("gcd-variation accounted for.")
                issdate = '0000-00-00'
                int_issnum = int(issis / 1000)
            break
        if 'nn' in str(gcdval['GCDIssue']):
            #no number detected - GN, TP or the like
            logger.warn(
                u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time."
            )
            updater.no_searchresults(comicid)
            return
        elif '.' in str(gcdval['GCDIssue']):
            issst = str(gcdval['GCDIssue']).find('.')
            issb4dec = str(gcdval['GCDIssue'])[:issst]
            #if the length of decimal is only 1 digit, assume it's a tenth
            decis = str(gcdval['GCDIssue'])[issst + 1:]
            if len(decis) == 1:
                decisval = int(decis) * 10
                issaftdec = str(decisval)
            if len(decis) == 2:
                decisval = int(decis)
                issaftdec = str(decisval)
            if int(issaftdec) == 0: issaftdec = "00"
            gcd_issue = issb4dec + "." + issaftdec
            gcdis = (int(issb4dec) * 1000) + decisval
        else:
            gcdis = int(str(gcdval['GCDIssue'])) * 1000
            gcd_issue = str(gcdval['GCDIssue'])
        #get the latest issue / date using the date.
        int_issnum = int(gcdis / 1000)
        issdate = str(gcdval['GCDDate'])
        issid = "G" + str(gcdval['IssueID'])
        if gcdval['GCDDate'] > latestdate:
            latestiss = str(gcd_issue)
            latestdate = str(gcdval['GCDDate'])
        #print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
        #---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.action('SELECT * from issues WHERE IssueID=?',
                                 [issid]).fetchone()

        # Only change the status & add DateAdded if the issue is not already in the database
        if iss_exists is None:
            newValueDict['DateAdded'] = helpers.today()

        #adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
        if "?" in str(issdate):
            issdate = "0000-00-00"

        controlValueDict = {"IssueID": issid}
        newValueDict = {
            "ComicID": gcomicid,
            "ComicName": ComicName,
            "Issue_Number": gcd_issue,
            "IssueDate": issdate,
            "Int_IssueNumber": int_issnum
        }

        #print ("issueid:" + str(controlValueDict))
        #print ("values:" + str(newValueDict))

        if mylar.AUTOWANT_ALL:
            newValueDict['Status'] = "Wanted"
        elif issdate > helpers.today() and mylar.AUTOWANT_UPCOMING:
            newValueDict['Status'] = "Wanted"
        else:
            newValueDict['Status'] = "Skipped"

        if iss_exists:
            #print ("Existing status : " + str(iss_exists['Status']))
            newValueDict['Status'] = iss_exists['Status']

        myDB.upsert("issues", newValueDict, controlValueDict)
        bb += 1

#        logger.debug(u"Updating comic cache for " + ComicName)
#        cache.getThumb(ComicID=issue['issueid'])

#        logger.debug(u"Updating cache for: " + ComicName)
#        cache.getThumb(ComicIDcomicid)

    controlValueStat = {"ComicID": gcomicid}
    newValueStat = {
        "Status": "Active",
        "LatestIssue": latestiss,
        "LatestDate": latestdate,
        "LastUpdated": helpers.now()
    }

    myDB.upsert("comics", newValueStat, controlValueStat)

    if mylar.CVINFO:
        if not os.path.exists(comlocation + "/cvinfo"):
            with open(comlocation + "/cvinfo", "w") as text_file:
                text_file.write("http://www.comicvine.com/volume/49-" +
                                str(comicid))

    logger.info(u"Updating complete for: " + ComicName)

    #move the files...if imported is not empty (meaning it's not from the mass importer.)
    if imported is None or imported == 'None':
        pass
    else:
        if mylar.IMP_MOVE:
            logger.info("Mass import - Move files")
            moveit.movefiles(gcomicid, comlocation, ogcname)
        else:
            logger.info(
                "Mass import - Moving not Enabled. Setting Archived Status for import."
            )
            moveit.archivefiles(gcomicid, ogcname)

    #check for existing files...
    updater.forceRescan(gcomicid)

    if pullupd is None:
        # lets' check the pullist for anyting at this time as well since we're here.
        if mylar.AUTOWANT_UPCOMING and 'Present' in ComicPublished:
            logger.info(u"Checking this week's pullist for new issues of " +
                        ComicName)
            updater.newpullcheck(comic['ComicName'], gcomicid)

        #here we grab issues that have been marked as wanted above...

        results = myDB.select(
            "SELECT * FROM issues where ComicID=? AND Status='Wanted'",
            [gcomicid])
        if results:
            logger.info(u"Attempting to grab wanted issues for : " + ComicName)

            for result in results:
                foundNZB = "none"
                if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL
                        or mylar.NEWZNAB or mylar.NZBX) and (mylar.SAB_HOST):
                    foundNZB = search.searchforissue(result['IssueID'])
                    if foundNZB == "yes":
                        updater.foundsearch(result['ComicID'],
                                            result['IssueID'])
        else:
            logger.info(u"No issues marked as wanted for " + ComicName)

        logger.info(u"Finished grabbing what I could.")
Ejemplo n.º 20
0
    def Process_next(self,comicid,issueid,issuenumOG,ml=None):
            annchk = "no"
            extensions = ('.cbr', '.cbz')
            snatchedtorrent = False
            myDB = db.DBConnection()
            comicnzb = myDB.selectone("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            issuenzb = myDB.selectone("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid,comicid]).fetchone()
            if ml is not None and mylar.SNATCHEDTORRENT_NOTIFY:
                snatchnzb = myDB.selectone("SELECT * from snatched WHERE IssueID=? AND ComicID=? AND (provider=? OR provider=?) AND Status='Snatched'", [issueid,comicid,'KAT','CBT']).fetchone() 
                if snatchnzb is None:
                    logger.fdebug('Was not downloaded with Mylar and the usage of torrents. Disabling torrent manual post-processing completion notification.')
                else:
                    logger.fdebug('Was downloaded from ' + snatchnzb['Provider'] + '. Enabling torrent manual post-processing completion notification.')
                    snatchedtorrent = True
            logger.fdebug('issueid: ' + str(issueid))
            logger.fdebug('issuenumOG: ' + str(issuenumOG))
            if issuenzb is None:
                issuenzb = myDB.selectone("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid,comicid]).fetchone()
                annchk = "yes"
            #issueno = str(issuenum).split('.')[0]
            #new CV API - removed all decimals...here we go AGAIN!
            issuenum = issuenzb['Issue_Number']
            issue_except = 'None'

            if 'au' in issuenum.lower() and issuenum[:1].isdigit():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = ' AU'
            elif 'ai' in issuenum.lower() and issuenum[:1].isdigit():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = ' AI'
            elif 'inh' in issuenum.lower() and issuenum[:1].isdigit():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = '.INH'
            elif 'now' in issuenum.lower() and issuenum[:1].isdigit():
                if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = '.NOW'

            if '.' in issuenum:
                iss_find = issuenum.find('.')
                iss_b4dec = issuenum[:iss_find]
                iss_decval = issuenum[iss_find+1:]
                if int(iss_decval) == 0:
                    iss = iss_b4dec
                    issdec = int(iss_decval)
                    issueno = str(iss)
                    self._log("Issue Number: " + str(issueno))
                    logger.fdebug("Issue Number: " + str(issueno))
                else:
                    if len(iss_decval) == 1:
                        iss = iss_b4dec + "." + iss_decval
                        issdec = int(iss_decval) * 10
                    else:
                        iss = iss_b4dec + "." + iss_decval.rstrip('0')
                        issdec = int(iss_decval.rstrip('0')) * 10
                    issueno = iss_b4dec
                    self._log("Issue Number: " + str(iss))
                    logger.fdebug("Issue Number: " + str(iss))
            else:
                iss = issuenum
                issueno = str(iss)

            # issue zero-suppression here
            if mylar.ZERO_LEVEL == "0": 
                zeroadd = ""
            else:
                if mylar.ZERO_LEVEL_N  == "none": zeroadd = ""
                elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
                elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"

            logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N))

            if str(len(issueno)) > 1:
                if int(issueno) < 0:
                    self._log("issue detected is a negative")
                    prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
                elif int(issueno) < 10:
                    self._log("issue detected less than 10")
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                            prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None': 
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss))
                elif int(issueno) >= 10 and int(issueno) < 100:
                    self._log("issue detected greater than 10, but less than 100")
                    if mylar.ZERO_LEVEL_N == "none":
                        zeroadd = ""
                    else:
                        zeroadd = "0"
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                           prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss))
                else:
                    self._log("issue detected greater than 100")
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                    prettycomiss = str(issueno)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss))
            else:
                prettycomiss = str(issueno)
                self._log("issue length error - cannot determine length. Defaulting to None:  " + str(prettycomiss))

            if annchk == "yes":
                self._log("Annual detected.")
            logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
            issueyear = issuenzb['IssueDate'][:4]
            self._log("Issue Year: " + str(issueyear))
            logger.fdebug("Issue Year : " + str(issueyear))
            month = issuenzb['IssueDate'][5:7].replace('-','').strip()
            month_name = helpers.fullmonth(month)
#            comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            publisher = comicnzb['ComicPublisher']
            self._log("Publisher: " + publisher)
            logger.fdebug("Publisher: " + str(publisher))
            #we need to un-unicode this to make sure we can write the filenames properly for spec.chars
            series = comicnzb['ComicName'].encode('ascii', 'ignore').strip()
            self._log("Series: " + series)
            logger.fdebug("Series: " + str(series))
            seriesyear = comicnzb['ComicYear']
            self._log("Year: " + seriesyear)
            logger.fdebug("Year: "  + str(seriesyear))
            comlocation = comicnzb['ComicLocation']
            self._log("Comic Location: " + comlocation)
            logger.fdebug("Comic Location: " + str(comlocation))
            comversion = comicnzb['ComicVersion']
            self._log("Comic Version: " + str(comversion))
            logger.fdebug("Comic Version: " + str(comversion))
            if comversion is None:
                comversion = 'None'
            #if comversion is None, remove it so it doesn't populate with 'None'
            if comversion == 'None':
                chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
                chunk_f = re.compile(r'\s+')
                chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                self._log("No version # found for series - tag will not be available for renaming.")
                logger.fdebug("No version # found for series, removing from filename")
                logger.fdebug("new format is now: " + str(chunk_file_format))
            else:
                chunk_file_format = mylar.FILE_FORMAT

            if annchk == "no":
                chunk_f_f = re.sub('\$Annual','',chunk_file_format)
                chunk_f = re.compile(r'\s+')
                chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                logger.fdebug('not an annual - removing from filename paramaters')
                logger.fdebug('new format: ' + str(chunk_file_format))

            else:
                logger.fdebug('chunk_file_format is: ' + str(chunk_file_format))
                if '$Annual' not in chunk_file_format:
                #if it's an annual, but $Annual isn't specified in file_format, we need to
                #force it in there, by default in the format of $Annual $Issue
                    prettycomiss = "Annual " + str(prettycomiss)
                    logger.fdebug('prettycomiss: ' + str(prettycomiss))


            ofilename = None

            #if meta-tagging is not enabled, we need to declare the check as being fail
            #if meta-tagging is enabled, it gets changed just below to a default of pass
            pcheck = "fail"

            #tag the meta.
            if mylar.ENABLE_META:
                self._log("Metatagging enabled - proceeding...")
                logger.fdebug("Metatagging enabled - proceeding...")
                pcheck = "pass"
                try:
                    import cmtagmylar
                    if ml is None:
                        pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid)
                    else:
                        pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, manual="yes", filename=ml['ComicLocation'])

                except ImportError:
                    logger.fdebug("comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/")
                    logger.fdebug("continuing with PostProcessing, but I'm not using metadata.")
                    pcheck = "fail"
                
                if pcheck == "fail":
                    self._log("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...")
                    logger.fdebug("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...")
                elif pcheck == "unrar error":
                    self._log("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.")
                    logger.error("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.")
                    return self.log
                else:
                    otofilename = pcheck
                    self._log("Sucessfully wrote metadata to .cbz - Continuing..")
                    logger.fdebug("Sucessfully wrote metadata to .cbz (" + str(otofilename) + ") - Continuing..")
            #Run Pre-script

            if mylar.ENABLE_PRE_SCRIPTS:
                nzbn = self.nzb_name #original nzb name
                nzbf = self.nzb_folder #original nzb folder
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_pre_scripts(nzbn, nzbf, seriesmetadata )

        #rename file and move to new path
        #nfilename = series + " " + issueno + " (" + seriesyear + ")"

            file_values = {'$Series':    series,
                           '$Issue':     prettycomiss,
                           '$Year':      issueyear,
                           '$series':    series.lower(),
                           '$Publisher': publisher,
                           '$publisher': publisher.lower(),
                           '$VolumeY':   'V' + str(seriesyear),
                           '$VolumeN':   comversion,
                           '$monthname': month_name,
                           '$month':     month,
                           '$Annual':    'Annual'
                          }


            #if it's a Manual Run, use the ml['ComicLocation'] for the exact filename.
            if ml is None:
               
                for root, dirnames, filenames in os.walk(self.nzb_folder):
                    for filename in filenames:
                        if filename.lower().endswith(extensions):
                            odir = root
                            ofilename = filename
                            path, ext = os.path.splitext(ofilename)
 
                if odir is None:
                    logger.fdebug('no root folder set.')
                    odir = self.nzb_folder
                logger.fdebug('odir: ' + str(odir))
                logger.fdebug('ofilename: ' + str(ofilename))

            else:
                if pcheck == "fail":
                    otofilename = ml['ComicLocation']
                logger.fdebug('otofilename:' + str(otofilename))
                odir, ofilename = os.path.split(otofilename)
                logger.fdebug('odir: ' + str(odir))
                logger.fdebug('ofilename: ' + str(ofilename))
                path, ext = os.path.splitext(ofilename)
                logger.fdebug('path: ' + str(path))
                logger.fdebug('ext:' + str(ext))

            if ofilename is None:
                logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.")
                return
            self._log("Original Filename: " + ofilename)
            self._log("Original Extension: " + ext)
            logger.fdebug("Original Filname: " + str(ofilename))
            logger.fdebug("Original Extension: " + str(ext))

            if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES:
                self._log("Rename Files isn't enabled...keeping original filename.")
                logger.fdebug("Rename Files isn't enabled - keeping original filename.")
                #check if extension is in nzb_name - will screw up otherwise
                if ofilename.lower().endswith(extensions):
                    nfilename = ofilename[:-4]
                else:
                    nfilename = ofilename
            else:
                nfilename = helpers.replace_all(chunk_file_format, file_values)
                if mylar.REPLACE_SPACES:
                    #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                    nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
            nfilename = re.sub('[\,\:\?]', '', nfilename)
            nfilename = re.sub('[\/]', '-', nfilename)
            self._log("New Filename: " + nfilename)
            logger.fdebug("New Filename: " + str(nfilename))

            #src = os.path.join(self.nzb_folder, ofilename)
            src = os.path.join(odir, ofilename)
            filechecker.validateAndCreateDirectory(comlocation, True)

            if mylar.LOWERCASE_FILENAMES:
                dst = (comlocation + "/" + nfilename + ext).lower()
            else:
                dst = comlocation + "/" + nfilename + ext.lower()    
            self._log("Source:" + src)
            self._log("Destination:" +  dst)
            logger.fdebug("Source: " + str(src))
            logger.fdebug("Destination: " + str(dst))

            if ml is None:
                #downtype = for use with updater on history table to set status to 'Downloaded'
                downtype = 'True'
                #non-manual run moving/deleting...
                logger.fdebug('self.nzb_folder: ' + self.nzb_folder)
                logger.fdebug('odir: ' + str(odir))
                logger.fdebug('ofilename:' + str(ofilename))
                logger.fdebug('nfilename:' + str(nfilename + ext))
                if mylar.RENAME_FILES:
                    if str(ofilename) != str(nfilename + ext):
                        logger.fdebug("Renaming " + os.path.join(odir, str(ofilename)) + " ..to.. " + os.path.join(odir,str(nfilename + ext)))
                        os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir,str(nfilename + ext)))
                    else:
                        logger.fdebug('filename is identical as original, not renaming.')

                #src = os.path.join(self.nzb_folder, str(nfilename + ext))
                src = os.path.join(odir, str(nfilename + ext))
                try:
                    shutil.move(src, dst)
                except (OSError, IOError):
                    self._log("Failed to move directory - check directories and manually re-run.")
                    self._log("Post-Processing ABORTED.")
                    return
                #tidyup old path
                try:
                    shutil.rmtree(self.nzb_folder)
                except (OSError, IOError):
                    self._log("Failed to remove temporary directory - check directory and manually re-run.")
                    self._log("Post-Processing ABORTED.")
                    return

                self._log("Removed temporary directory : " + str(self.nzb_folder))
            else:
                #downtype = for use with updater on history table to set status to 'Post-Processed'
                downtype = 'PP'
                #Manual Run, this is the portion.
                if mylar.RENAME_FILES:
                    if str(ofilename) != str(nfilename + ext):
                        logger.fdebug("Renaming " + os.path.join(self.nzb_folder, str(ofilename)) + " ..to.. " + os.path.join(self.nzb_folder,str(nfilename + ext)))
                        os.rename(os.path.join(odir, str(ofilename)), os.path.join(odir ,str(nfilename + ext)))
                    else:
                        logger.fdebug('filename is identical as original, not renaming.')
                src = os.path.join(odir, str(nfilename + ext))
                logger.fdebug('odir rename: ' + os.path.join(odir, str(ofilename)) + ' TO ' + os.path.join(odir, str(nfilename + ext)))
                logger.fdebug('odir src : ' + os.path.join(odir, str(nfilename + ext)))
                logger.fdebug("Moving " + src + " ... to ... " + dst)
                try:
                    shutil.move(src, dst)
                except (OSError, IOError):
                    logger.fdebug("Failed to move directory - check directories and manually re-run.")
                    logger.fdebug("Post-Processing ABORTED.")
                    return
                logger.fdebug("Successfully moved to : " + dst)
                #tidyup old path
                #try:
                #    os.remove(os.path.join(self.nzb_folder, str(ofilename)))
                #    logger.fdebug("Deleting : " + os.path.join(self.nzb_folder, str(ofilename)))
                #except (OSError, IOError):
                #    logger.fdebug("Failed to remove temporary directory - check directory and manually re-run.")
                #    logger.fdebug("Post-Processing ABORTED.")
                #    return
                #logger.fdebug("Removed temporary directory : " + str(self.nzb_folder))

            #Hopefully set permissions on downloaded file
            try:
                permission = int(mylar.CHMOD_FILE, 8)
                os.umask(0)
                os.chmod(dst.rstrip(), permission)
            except OSError:
                logger.error('Failed to change file permissions. Ensure that the user running Mylar has proper permissions to change permissions in : ' + dst)
                logger.fdebug('Continuing post-processing but unable to change file permissions in ' + dst)
                    #delete entry from nzblog table
            myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                    #update snatched table to change status to Downloaded
            
            if annchk == "no":
                updater.foundsearch(comicid, issueid, down=downtype)
                dispiss = 'issue: ' + str(issuenumOG)
            else:
                updater.foundsearch(comicid, issueid, mode='want_ann', down=downtype)
                dispiss = 'annual issue: ' + str(issuenumOG)

                    #force rescan of files
            updater.forceRescan(comicid)
            logger.info(u"Post-Processing completed for: " + series + " " + dispiss )
            self._log(u"Post Processing SUCCESSFUL! ")

            if mylar.WEEKFOLDER:
                #if enabled, will *copy* the post-processed file to the weeklypull list folder for the given week.
                weeklypull.weekly_singlecopy(comicid,issuenum,str(nfilename+ext),dst)

            # retrieve/create the corresponding comic objects
            if mylar.ENABLE_EXTRA_SCRIPTS:
                folderp = str(dst) #folder location after move/rename
                nzbn = self.nzb_name #original nzb name
                filen = str(nfilename + ext) #new filename
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_extra_scripts(nzbn, self.nzb_folder, filen, folderp, seriesmetadata )

            if ml is not None:
                #we only need to return self.log if it's a manual run and it's not a snatched torrent
                if snatchedtorrent: 
                    #manual run + snatched torrent
                    pass
                else:
                    #manual run + not snatched torrent (or normal manual-run)
                    return self.log

            if annchk == "no":
                prline = series + '(' + issueyear + ') - issue #' + issuenumOG
            else:
                prline = series + ' Annual (' + issueyear + ') - issue #' + issuenumOG
            prline2 = 'Mylar has downloaded and post-processed: ' + prline

            if mylar.PROWL_ENABLED:
                pushmessage = prline
                logger.info(u"Prowl request")
                prowl = notifiers.PROWL()
                prowl.notify(pushmessage,"Download and Postprocessing completed")
    
            if mylar.NMA_ENABLED:
                nma = notifiers.NMA()
                nma.notify(prline=prline, prline2=prline2)

            if mylar.PUSHOVER_ENABLED:
                logger.info(u"Pushover request")
                pushover = notifiers.PUSHOVER()
                pushover.notify(prline, "Download and Post-Processing completed")

            if mylar.BOXCAR_ENABLED:
                boxcar = notifiers.BOXCAR()
                boxcar.notify(prline=prline, prline2=prline2)

            if mylar.PUSHBULLET_ENABLED:
                pushbullet = notifiers.PUSHBULLET()
                pushbullet.notify(prline=prline, prline2=prline2)
             
            return self.log
Ejemplo n.º 21
0
def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None, queue=None):

    if cron and not mylar.LIBRARYSCAN:
        return

    if not dir:
        dir = mylar.COMIC_DIR

    # If we're appending a dir, it's coming from the post processor which is
    # already bytestring
    if not append:
        dir = dir.encode(mylar.SYS_ENCODING)

    if not os.path.isdir(dir):
        logger.warn('Cannot find directory: %s. Not scanning' % dir.decode(mylar.SYS_ENCODING, 'replace'))
        return


    logger.info('Scanning comic directory: %s' % dir.decode(mylar.SYS_ENCODING, 'replace'))

    basedir = dir

    comic_list = []
    failure_list = []
    comiccnt = 0
    extensions = ('cbr','cbz')
    cv_location = []
    cbz_retry = 0

    mylar.IMPORT_STATUS = 'Now attempting to parse files for additional information'

    #mylar.IMPORT_PARSED_COUNT #used to count what #/totalfiles the filename parser is currently on
    for r, d, f in os.walk(dir):
        for files in f:
            mylar.IMPORT_FILES +=1
            if 'cvinfo' in files:
                cv_location.append(r)
                logger.fdebug('CVINFO found: ' + os.path.join(r))
            if any(files.lower().endswith('.' + x.lower()) for x in extensions):
                comic = files
                comicpath = os.path.join(r, files)
                comicsize = os.path.getsize(comicpath)
                t = filechecker.FileChecker(dir=r, file=comic)
                results = t.listFiles()
                #logger.info(results)
                #'type':           re.sub('\.','', filetype).strip(),
                #'sub':            path_list,
                #'volume':         volume,
                #'match_type':     match_type,
                #'comicfilename':  filename,
                #'comiclocation':  clocation,
                #'series_name':    series_name,
                #'series_volume':  issue_volume,
                #'series_year':    issue_year,
                #'justthedigits':  issue_number,
                #'annualcomicid':  annual_comicid,
                #'scangroup':      scangroup}

                logger.fdebug('Comic: ' + comic + ' [' + comicpath + '] - ' + str(comicsize) + ' bytes')

                if results:
                    resultline = '[PARSE-' + results['parse_status'].upper() + ']'
                    resultline += '[SERIES: ' + results['series_name'] + ']'
                    if results['series_volume'] is not None:
                        resultline += '[VOLUME: ' + results['series_volume'] + ']'
                    if results['issue_year'] is not None:
                        resultline += '[ISSUE YEAR: ' + str(results['issue_year']) + ']'
                    if results['issue_number'] is not None:
                        resultline += '[ISSUE #: ' + results['issue_number'] + ']'
                    logger.fdebug(resultline)
                else:
                    logger.fdebug('[PARSED] FAILURE.')
                    continue

                # We need the unicode path to use for logging, inserting into database
                unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING, 'replace')

                if results['parse_status'] == 'success':
                    comic_list.append({'ComicFilename':           comic,
                                       'ComicLocation':           comicpath,
                                       'ComicSize':               comicsize,
                                       'Unicode_ComicLocation':   unicode_comic_path,
                                       'parsedinfo':              {'series_name':    results['series_name'],
                                                                   'series_volume':  results['series_volume'],
                                                                   'issue_year':     results['issue_year'],
                                                                   'issue_number':   results['issue_number']}
                                       })
                    comiccnt +=1
                    mylar.IMPORT_PARSED_COUNT +=1
                else:
                    failure_list.append({'ComicFilename':           comic,
                                         'ComicLocation':           comicpath,
                                         'ComicSize':               comicsize,
                                         'Unicode_ComicLocation':   unicode_comic_path,
                                         'parsedinfo':              {'series_name':    results['series_name'],
                                                                     'series_volume':  results['series_volume'],
                                                                     'issue_year':     results['issue_year'],
                                                                     'issue_number':   results['issue_number']}
                                       })
                    mylar.IMPORT_FAILURE_COUNT +=1
                    if comic.endswith('.cbz'):
                        cbz_retry +=1


    mylar.IMPORT_TOTALFILES = comiccnt
    logger.info('I have successfully discovered & parsed a total of ' + str(comiccnt) + ' files....analyzing now')
    logger.info('I have not been able to determine what ' + str(len(failure_list)) + ' files are')
    logger.info('However, ' + str(cbz_retry) + ' files are in a cbz format, which may contain metadata.')

    mylar.IMPORT_STATUS = 'Successfully parsed ' + str(comiccnt) + ' files'

    #return queue.put(valreturn)

    myDB = db.DBConnection()

    #let's load in the watchlist to see if we have any matches.
    logger.info("loading in the watchlist to see if a series is being watched already...")
    watchlist = myDB.select("SELECT * from comics")
    ComicName = []
    DisplayName = []
    ComicYear = []
    ComicPublisher = []
    ComicTotal = []
    ComicID = []
    ComicLocation = []

    AltName = []
    watchcnt = 0

    watch_kchoice = []
    watchchoice = {}
    import_by_comicids = []
    import_comicids = {}

    for watch in watchlist:
        #use the comicname_filesafe to start
        watchdisplaycomic = watch['ComicName'].encode('utf-8').strip() #re.sub('[\_\#\,\/\:\;\!\$\%\&\+\'\?\@]', ' ', watch['ComicName']).encode('utf-8').strip()
        # let's clean up the name, just in case for comparison purposes...
        watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', watch['ComicName_Filesafe']).encode('utf-8').strip()
        #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()

        if ' the ' in watchcomic.lower():
            #drop the 'the' from the watchcomic title for proper comparisons.
            watchcomic = watchcomic[-4:]

        alt_chk = "no" # alt-checker flag (default to no)

        # account for alternate names as well
        if watch['AlternateSearch'] is not None and watch['AlternateSearch'] is not 'None':
            altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', '', watch['AlternateSearch']).encode('utf-8').strip()
            #altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
            AltName.append(altcomic)
            alt_chk = "yes"  # alt-checker flag

        ComicName.append(watchcomic)
        DisplayName.append(watchdisplaycomic)
        ComicYear.append(watch['ComicYear'])
        ComicPublisher.append(watch['ComicPublisher'])
        ComicTotal.append(watch['Total'])
        ComicID.append(watch['ComicID'])
        ComicLocation.append(watch['ComicLocation'])
        watchcnt+=1

    logger.info("Successfully loaded " + str(watchcnt) + " series from your watchlist.")

    ripperlist=['digital-',
                'empire',
                'dcp']

    watchfound = 0

    datelist = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
#    datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
#    #search for number as text, and change to numeric
#    for numbs in basnumbs:
#        #logger.fdebug("numbs:" + str(numbs))
#        if numbs in ComicName.lower():
#            numconv = basnumbs[numbs]
#            #logger.fdebug("numconv: " + str(numconv))

    issueid_list = []
    cvscanned_loc = None
    cvinfo_CID = None
    cnt = 0
    mylar.IMPORT_STATUS = '[0%] Now parsing individual filenames for metadata if available'

    for i in comic_list:
        mylar.IMPORT_STATUS = '[' + str(cnt) + '/' + str(comiccnt) + '] Now parsing individual filenames for metadata if available'
        logger.fdebug('Analyzing : ' + i['ComicFilename'])
        comfilename = i['ComicFilename']
        comlocation = i['ComicLocation']
        issueinfo = None
        #probably need to zero these issue-related metadata to None so we can pick the best option
        issuevolume = None

        #Make sure cvinfo is checked for FIRST (so that CID can be attached to all files properly thereafter as they're scanned in)
        if os.path.dirname(comlocation) in cv_location and os.path.dirname(comlocation) != cvscanned_loc:

        #if comfilename == 'cvinfo':
            logger.info('comfilename: ' + comfilename)
            logger.info('cvscanned_loc: ' + str(cv_location))
            logger.info('comlocation: ' + os.path.dirname(comlocation))
            #if cvscanned_loc != comlocation:
            try:
                with open(os.path.join(os.path.dirname(comlocation), 'cvinfo')) as f:
                    urllink = f.readline()

                if urllink:
                    cid = urllink.split('/')
                    if '4050-' in cid[-2]:
                        cvinfo_CID = re.sub('4050-', '', cid[-2]).strip()
                        logger.info('CVINFO file located within directory. Attaching everything in directory that is valid to ComicID: ' + str(cvinfo_CID))
                        #store the location of the cvinfo so it's applied to the correct directory (since we're scanning multile direcorties usually)
                        cvscanned_loc = os.path.dirname(comlocation)
                else:
                    logger.error("Could not read cvinfo file properly (or it does not contain any data)")
            except (OSError, IOError):
                logger.error("Could not read cvinfo file properly (or it does not contain any data)")
        #else:
        #    don't scan in it again if it's already been done initially
        #    continue

        if mylar.IMP_METADATA:
            #if read tags is enabled during import, check here.
            if i['ComicLocation'].endswith('.cbz'):
                logger.fdebug('[IMPORT-CBZ] Metatagging checking enabled.')
                logger.info('[IMPORT-CBZ} Attempting to read tags present in filename: ' + i['ComicLocation'])
                issueinfo = helpers.IssueDetails(i['ComicLocation'])
                logger.info('issueinfo: ' + str(issueinfo))
                if issueinfo is None:
                    logger.fdebug('[IMPORT-CBZ] No valid metadata contained within filename. Dropping down to parsing the filename itself.')
                    pass
                else:
                    issuenotes_id = None
                    logger.info('[IMPORT-CBZ] Successfully retrieved some tags. Lets see what I can figure out.')
                    comicname = issueinfo[0]['series']
                    if comicname is not None:
                        logger.fdebug('[IMPORT-CBZ] Series Name: ' + comicname)
                        as_d = filechecker.FileChecker(watchcomic=comicname.decode('utf-8'))
                        as_dyninfo = as_d.dynamic_replace(comicname)
                        logger.fdebug('Dynamic-ComicName: ' + as_dyninfo['mod_seriesname'])
                    else:
                        logger.fdebug('[IMPORT-CBZ] No series name found within metadata. This is bunk - dropping down to file parsing for usable information.')
                        issueinfo = None
                        issue_number = None

                    if issueinfo is not None:
                        try:
                            issueyear = issueinfo[0]['year']
                        except:
                            issueyear = None

                        #if the issue number is a non-numeric unicode string, this will screw up along with impID
                        issue_number = issueinfo[0]['issue_number']
                        if issue_number is not None:
                            logger.fdebug('[IMPORT-CBZ] Issue Number: ' + issue_number)
                        else:
                            issue_number = i['parsed']['issue_number']

                        if 'annual' in comicname.lower() or 'annual' in comfilename.lower():
                            if issue_number is None or issue_number == 'None':
                                logger.info('Annual detected with no issue number present within metadata. Assuming year as issue.')
                                try:
                                    issue_number = 'Annual ' + str(issueyear)
                                except:
                                     issue_number = 'Annual ' + i['parsed']['issue_year']
                            else:
                                logger.info('Annual detected with issue number present within metadata.')
                                if 'annual' not in issue_number.lower():
                                    issue_number = 'Annual ' + issue_number
                            mod_series = re.sub('annual', '', comicname, flags=re.I).strip()
                        else:
                            mod_series = comicname

                        logger.fdebug('issue number SHOULD Be: ' + issue_number)

                        try:
                            issuetitle = issueinfo[0]['title']
                        except:
                            issuetitle = None
                        try:
                            issueyear = issueinfo[0]['year']
                        except:
                            issueyear = None
                        try:
                            issuevolume = str(issueinfo[0]['volume'])
                            if all([issuevolume is not None, issuevolume != 'None']) and not issuevolume.lower().startswith('v'):
                                issuevolume = 'v' + str(issuevolume)
                            logger.fdebug('[TRY]issue volume is: ' + str(issuevolume))
                        except:
                            logger.fdebug('[EXCEPT]issue volume is: ' + str(issuevolume))
                            issuevolume = None

                    if any([comicname is None, comicname == 'None', issue_number is None, issue_number == 'None']):
                        logger.fdebug('[IMPORT-CBZ] Improperly tagged file as the metatagging is invalid. Ignoring meta and just parsing the filename.')
                        issueinfo = None
                        pass
                    else:
                        # if used by ComicTagger, Notes field will have the IssueID.
                        issuenotes = issueinfo[0]['notes']
                        logger.fdebug('[IMPORT-CBZ] Notes: ' + issuenotes)
                        if issuenotes is not None and issuenotes != 'None':
                            if 'Issue ID' in issuenotes:
                                st_find = issuenotes.find('Issue ID')
                                tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
                                if tmp_issuenotes_id.isdigit():
                                    issuenotes_id = tmp_issuenotes_id
                                    logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']')
                            elif 'CVDB' in issuenotes:
                                st_find = issuenotes.find('CVDB')
                                tmp_issuenotes_id = re.sub("[^0-9]", " ", issuenotes[st_find:]).strip()
                                if tmp_issuenotes_id.isdigit():
                                    issuenotes_id = tmp_issuenotes_id
                                    logger.fdebug('[IMPORT-CBZ] Successfully retrieved CV IssueID for ' + comicname + ' #' + issue_number + ' [' + str(issuenotes_id) + ']')
                            else:
                                logger.fdebug('[IMPORT-CBZ] Unable to retrieve IssueID from meta-tagging. If there is other metadata present I will use that.')

                        logger.fdebug('[IMPORT-CBZ] Adding ' + comicname + ' to the import-queue!')
                        #impid = comicname + '-' + str(issueyear) + '-' + str(issue_number) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
                        impid = str(random.randint(1000000,99999999))
                        logger.fdebug('[IMPORT-CBZ] impid: ' + str(impid))
                        #make sure we only add in those issueid's which don't already have a comicid attached via the cvinfo scan above (this is for reverse-lookup of issueids)
                        issuepopulated = False
                        if cvinfo_CID is None:
                            if issuenotes_id is None:
                                logger.info('[IMPORT-CBZ] No ComicID detected where it should be. Bypassing this metadata entry and going the parsing route [' + comfilename + ']')
                            else:
                                #we need to store the impid here as well so we can look it up.
                                issueid_list.append({'issueid':    issuenotes_id,
                                                     'importinfo': {'impid':       impid,
                                                                    'comicid':     None,
                                                                    'comicname':   comicname,
                                                                    'dynamicname': as_dyninfo['mod_seriesname'],
                                                                    'comicyear':   issueyear,
                                                                    'issuenumber': issue_number,
                                                                    'volume':      issuevolume,
                                                                    'comfilename': comfilename,
                                                                    'comlocation': comlocation.decode(mylar.SYS_ENCODING)}
                                                     })
                                mylar.IMPORT_CID_COUNT +=1
                                issuepopulated = True

                        if issuepopulated == False:
                            if cvscanned_loc == os.path.dirname(comlocation):
                                cv_cid = cvinfo_CID
                                logger.fdebug('[IMPORT-CBZ] CVINFO_COMICID attached : ' + str(cv_cid))
                            else:
                                cv_cid = None
                            import_by_comicids.append({
                                "impid": impid,
                                "comicid": cv_cid,
                                "watchmatch": None,
                                "displayname": mod_series,
                                "comicname": comicname,
                                "dynamicname": as_dyninfo['mod_seriesname'],
                                "comicyear": issueyear,
                                "issuenumber": issue_number,
                                "volume": issuevolume,
                                "issueid": issuenotes_id,
                                "comfilename": comfilename,
                                "comlocation": comlocation.decode(mylar.SYS_ENCODING)
                                               })

                            mylar.IMPORT_CID_COUNT +=1
            else:
                pass            
                #logger.fdebug(i['ComicFilename'] + ' is not in a metatagged format (cbz). Bypassing reading of the metatags')

        if issueinfo is None:
            if i['parsedinfo']['issue_number'] is None:
                if 'annual' in i['parsedinfo']['series_name'].lower():
                    logger.fdebug('Annual detected with no issue number present. Assuming year as issue.')##1 issue')
                    if i['parsedinfo']['issue_year'] is not None:
                        issuenumber = 'Annual ' + str(i['parsedinfo']['issue_year'])
                    else:
                        issuenumber = 'Annual 1'
            else:
                issuenumber = i['parsedinfo']['issue_number']

            if 'annual' in i['parsedinfo']['series_name'].lower():
                mod_series = re.sub('annual', '', i['parsedinfo']['series_name'], flags=re.I).strip()
                logger.fdebug('Annual detected with no issue number present. Assuming year as issue.')##1 issue')
                if i['parsedinfo']['issue_number'] is not None:
                    issuenumber = 'Annual ' + str(i['parsedinfo']['issue_number'])
                else:
                    if i['parsedinfo']['issue_year'] is not None:
                        issuenumber = 'Annual ' + str(i['parsedinfo']['issue_year'])
                    else:
                        issuenumber = 'Annual 1'
            else:
                mod_series = i['parsedinfo']['series_name']
                issuenumber = i['parsedinfo']['issue_number']


            logger.fdebug('[' + mod_series + '] Adding to the import-queue!')
            isd = filechecker.FileChecker(watchcomic=mod_series.decode('utf-8'))
            is_dyninfo = isd.dynamic_replace(mod_series)
            logger.fdebug('Dynamic-ComicName: ' + is_dyninfo['mod_seriesname'])

            #impid = dispname + '-' + str(result_comyear) + '-' + str(comiss) #com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
            impid = str(random.randint(1000000,99999999))
            logger.fdebug("impid: " + str(impid))
            if cvscanned_loc == os.path.dirname(comlocation):
                cv_cid = cvinfo_CID
                logger.fdebug('CVINFO_COMICID attached : ' + str(cv_cid))
            else:
                cv_cid = None

            if issuevolume is None:
                logger.fdebug('issue volume is : ' + str(issuevolume))
                if i['parsedinfo']['series_volume'] is None:
                    issuevolume = None
                else:
                    if str(i['parsedinfo']['series_volume'].lower()).startswith('v'):
                        issuevolume = i['parsedinfo']['series_volume']
                    else:
                        issuevolume = 'v' + str(i['parsedinfo']['series_volume'])
            else:
                logger.fdebug('issue volume not none : ' + str(issuevolume))
                if issuevolume.lower().startswith('v'):
                    issuevolume = issuevolume
                else:
                    issuevolume = 'v' + str(issuevolume)

            logger.fdebug('IssueVolume is : ' + str(issuevolume))

            import_by_comicids.append({
                "impid": impid,
                "comicid": cv_cid,
                "issueid": None,
                "watchmatch": None, #watchmatch (should be true/false if it already exists on watchlist)
                "displayname": mod_series,
                "comicname": i['parsedinfo']['series_name'],
                "dynamicname": is_dyninfo['mod_seriesname'].lower(),
                "comicyear": i['parsedinfo']['issue_year'],
                "issuenumber": issuenumber, #issuenumber,
                "volume": issuevolume,
                "comfilename": comfilename,
                "comlocation": comlocation.decode(mylar.SYS_ENCODING)
                                      })
        cnt+=1
    #logger.fdebug('import_by_ids: ' + str(import_by_comicids))

    #reverse lookup all of the gathered IssueID's in order to get the related ComicID
    reverse_issueids = []
    for x in issueid_list:
        reverse_issueids.append(x['issueid'])

    vals = None
    if len(reverse_issueids) > 0:
        mylar.IMPORT_STATUS = 'Now Reverse looking up ' + str(len(reverse_issueids)) + ' IssueIDs to get the ComicIDs'
        vals = mylar.cv.getComic(None, 'import', comicidlist=reverse_issueids)
        #logger.fdebug('vals returned:' + str(vals))

    if len(watch_kchoice) > 0:
        watchchoice['watchlist'] = watch_kchoice
        #logger.fdebug("watchchoice: " + str(watchchoice))

        logger.info("I have found " + str(watchfound) + " out of " + str(comiccnt) + " comics for series that are being watched.")
        wat = 0
        comicids = []

        if watchfound > 0:
            if mylar.IMP_MOVE:
                logger.info('You checked off Move Files...so that\'s what I am going to do') 
                #check to see if Move Files is enabled.
                #if not being moved, set the archive bit.
                logger.fdebug('Moving files into appropriate directory')
                while (wat < watchfound): 
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comlocation = watch_the_list['ComicLocation']
                    watch_comicid = watch_the_list['ComicID']
                    watch_comicname = watch_the_list['ComicName']
                    watch_comicyear = watch_the_list['ComicYear']
                    watch_comiciss = watch_the_list['ComicIssue']
                    logger.fdebug('ComicLocation: ' + watch_comlocation)
                    orig_comlocation = watch_the_list['OriginalLocation']
                    orig_filename = watch_the_list['OriginalFilename'] 
                    logger.fdebug('Orig. Location: ' + orig_comlocation)
                    logger.fdebug('Orig. Filename: ' + orig_filename)
                    #before moving check to see if Rename to Mylar structure is enabled.
                    if mylar.IMP_RENAME:
                        logger.fdebug('Renaming files according to configuration details : ' + str(mylar.FILE_FORMAT))
                        renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss)
                        nfilename = renameit['nfilename']

                        dst_path = os.path.join(watch_comlocation, nfilename)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    else:
                        logger.fdebug('Renaming files not enabled, keeping original filename(s)')
                        dst_path = os.path.join(watch_comlocation, orig_filename)

                    #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                    #src = os.path.join(, str(nfilename + ext))
                    logger.fdebug('I am going to move ' + orig_comlocation + ' to ' + dst_path)
                    try:
                        shutil.move(orig_comlocation, dst_path)
                    except (OSError, IOError):
                        logger.info("Failed to move directory - check directories and manually re-run.")
                    wat+=1
            else:
                # if move files isn't enabled, let's set all found comics to Archive status :)
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comicid = watch_the_list['ComicID']
                    watch_issue = watch_the_list['ComicIssue']
                    logger.fdebug('ComicID: ' + str(watch_comicid))
                    logger.fdebug('Issue#: ' + str(watch_issue))
                    issuechk = myDB.selectone("SELECT * from issues where ComicID=? AND INT_IssueNumber=?", [watch_comicid, watch_issue]).fetchone()
                    if issuechk is None:
                        logger.fdebug('No matching issues for this comic#')
                    else:
                        logger.fdebug('...Existing status: ' + str(issuechk['Status']))
                        control = {"IssueID":   issuechk['IssueID']}
                        values = {"Status":   "Archived"}
                        logger.fdebug('...changing status of ' + str(issuechk['Issue_Number']) + ' to Archived ')
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    wat+=1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd ):
                    logger.fdebug('Rescanning.. ' + str(c))
                    updater.forceRescan(c) 
        if not len(import_by_comicids):
            return "Completed"

    if len(import_by_comicids) > 0 or len(vals) > 0:
        #import_comicids['comic_info'] = import_by_comicids
        #if vals:
        #    import_comicids['issueid_info'] = vals
        #else:
        #    import_comicids['issueid_info'] = None
        if vals:
             cvimport_comicids = vals
             import_cv_ids = len(vals)
        else:
             cvimport_comicids = None
             import_cv_ids = 0
    else:
        import_cv_ids = 0
                    
    return {'import_by_comicids':  import_by_comicids, 
            'import_count':        len(import_by_comicids),
            'CV_import_comicids':  cvimport_comicids,
            'import_cv_ids':       import_cv_ids,
            'issueid_list':        issueid_list,
            'failure_list':        failure_list}
Ejemplo n.º 22
0
  
    logger.info(u"Updating complete for: " + comic['ComicName'])

    #move the files...if imported is not empty (meaning it's not from the mass importer.)
    if imported is None or imported == 'None':
        pass
    else:
        if mylar.IMP_MOVE:
            logger.info("Mass import - Move files")
            moveit.movefiles(comicid,comlocation,ogcname)
        else:
            logger.info("Mass import - Moving not Enabled. Setting Archived Status for import.")
            moveit.archivefiles(comicid,ogcname)

    #check for existing files...
    updater.forceRescan(comicid)

    if pullupd is None:
    # lets' check the pullist for anything at this time as well since we're here.
    # do this for only Present comics....
        if mylar.AUTOWANT_UPCOMING and lastpubdate == 'Present': #and 'Present' in gcdinfo['resultPublished']:
            print ("latestissue: #" + str(latestiss))
            chkstats = myDB.action("SELECT * FROM issues WHERE ComicID=? AND Issue_Number=?", [comicid,str(latestiss)]).fetchone()
            print chkstats['Status']
            if chkstats['Status'] == 'Skipped' or chkstats['Status'] == 'Wanted' or chkstats['Status'] == 'Snatched':
                logger.info(u"Checking this week's pullist for new issues of " + comic['ComicName'])
                updater.newpullcheck(comic['ComicName'], comicid)

        #here we grab issues that have been marked as wanted above...
  
                results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid])
Ejemplo n.º 23
0
    def Process_next(self,comicid,issueid,issuenumOG,ml=None):
            annchk = "no"
            extensions = ('.cbr', '.cbz')
            myDB = db.DBConnection()
            comicnzb = myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            issuenzb = myDB.action("SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL", [issueid,comicid]).fetchone()
            print "issueid: " + str(issueid)
            print "issuenumOG: " + str(issuenumOG)
            if issuenzb is None:
                print "chk1"
                issuenzb = myDB.action("SELECT * from annuals WHERE issueid=? and comicid=?", [issueid,comicid]).fetchone()
                print "chk2"
                annchk = "yes"
            print issuenzb
            #issueno = str(issuenum).split('.')[0]
            #new CV API - removed all decimals...here we go AGAIN!
            issuenum = issuenzb['Issue_Number']
            issue_except = 'None'
            if 'au' in issuenum.lower():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = ' AU'
            elif 'ai' in issuenum.lower():
                issuenum = re.sub("[^0-9]", "", issuenum)
                issue_except = ' AI'
            if '.' in issuenum:
                iss_find = issuenum.find('.')
                iss_b4dec = issuenum[:iss_find]
                iss_decval = issuenum[iss_find+1:]
                if int(iss_decval) == 0:
                    iss = iss_b4dec
                    issdec = int(iss_decval)
                    issueno = str(iss)
                    self._log("Issue Number: " + str(issueno), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(issueno))
                else:
                    if len(iss_decval) == 1:
                        iss = iss_b4dec + "." + iss_decval
                        issdec = int(iss_decval) * 10
                    else:
                        iss = iss_b4dec + "." + iss_decval.rstrip('0')
                        issdec = int(iss_decval.rstrip('0')) * 10
                    issueno = iss_b4dec
                    self._log("Issue Number: " + str(iss), logger.DEBUG)
                    logger.fdebug("Issue Number: " + str(iss))
            else:
                iss = issuenum
                issueno = str(iss)
            # issue zero-suppression here
            if mylar.ZERO_LEVEL == "0": 
                zeroadd = ""
            else:
                if mylar.ZERO_LEVEL_N  == "none": zeroadd = ""
                elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
                elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"

            logger.fdebug("Zero Suppression set to : " + str(mylar.ZERO_LEVEL_N))

            if str(len(issueno)) > 1:
                if int(issueno) < 10:
                    self._log("issue detected less than 10", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                            prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None': 
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                elif int(issueno) >= 10 and int(issueno) < 100:
                    self._log("issue detected greater than 10, but less than 100", logger.DEBUG)
                    if mylar.ZERO_LEVEL_N == "none":
                        zeroadd = ""
                    else:
                        zeroadd = "0"
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                            prettycomiss = str(zeroadd) + str(iss)
                        else:
                           prettycomiss = str(zeroadd) + str(int(issueno))
                    else:
                        prettycomiss = str(zeroadd) + str(iss)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss), logger.DEBUG)
                else:
                    self._log("issue detected greater than 100", logger.DEBUG)
                    if '.' in iss:
                        if int(iss_decval) > 0:
                            issueno = str(iss)
                    prettycomiss = str(issueno)
                    if issue_except != 'None':
                        prettycomiss = str(prettycomiss) + issue_except
                    self._log("Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss), logger.DEBUG)
            else:
                prettycomiss = str(issueno)
                self._log("issue length error - cannot determine length. Defaulting to None:  " + str(prettycomiss), logger.DEBUG)

            if annchk == "yes":
                prettycomiss = "Annual " + str(prettycomiss)
                self._log("Annual detected.")
            logger.fdebug("Pretty Comic Issue is : " + str(prettycomiss))
            issueyear = issuenzb['IssueDate'][:4]
            self._log("Issue Year: " + str(issueyear), logger.DEBUG)
            logger.fdebug("Issue Year : " + str(issueyear))
#            comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
            publisher = comicnzb['ComicPublisher']
            self._log("Publisher: " + publisher, logger.DEBUG)
            logger.fdebug("Publisher: " + str(publisher))
            #we need to un-unicode this to make sure we can write the filenames properly for spec.chars
            series = comicnzb['ComicName'].encode('ascii', 'ignore').strip()
            self._log("Series: " + series, logger.DEBUG)
            logger.fdebug("Series: " + str(series))
            seriesyear = comicnzb['ComicYear']
            self._log("Year: " + seriesyear, logger.DEBUG)
            logger.fdebug("Year: "  + str(seriesyear))
            comlocation = comicnzb['ComicLocation']
            self._log("Comic Location: " + comlocation, logger.DEBUG)
            logger.fdebug("Comic Location: " + str(comlocation))
            comversion = comicnzb['ComicVersion']
            self._log("Comic Version: " + str(comversion), logger.DEBUG)
            logger.fdebug("Comic Version: " + str(comversion))
            if comversion is None:
                comversion = 'None'
            #if comversion is None, remove it so it doesn't populate with 'None'
            if comversion == 'None':
                chunk_f_f = re.sub('\$VolumeN','',mylar.FILE_FORMAT)
                chunk_f = re.compile(r'\s+')
                chunk_file_format = chunk_f.sub(' ', chunk_f_f)
                self._log("No version # found for series - tag will not be available for renaming.", logger.DEBUG)
                logger.fdebug("No version # found for series, removing from filename")
                logger.fdebug("new format is now: " + str(chunk_file_format))
            else:
                chunk_file_format = mylar.FILE_FORMAT

            ofilename = None

            #if meta-tagging is not enabled, we need to declare the check as being fail
            #if meta-tagging is enabled, it gets changed just below to a default of pass
            pcheck = "fail"

            #tag the meta.
            if mylar.ENABLE_META:
                self._log("Metatagging enabled - proceeding...")
                logger.fdebug("Metatagging enabled - proceeding...")
                pcheck = "pass"
                try:
                    import cmtagmylar
                    if ml is None:
                        pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid)
                    else:
                        pcheck = cmtagmylar.run(self.nzb_folder, issueid=issueid, manual="yes", filename=ml['ComicLocation'])

                except ImportError:
                    logger.fdebug("comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/")
                    logger.fdebug("continuing with PostProcessing, but I'm not using metadata.")
                    pcheck = "fail"
                
                if pcheck == "fail":
                    self._log("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...")
                    logger.fdebug("Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging...")
                elif pcheck == "unrar error":
                    self._log("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.")
                    logger.error("This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy.")
                    return self.log
                else:
                    otofilename = pcheck
                    self._log("Sucessfully wrote metadata to .cbz - Continuing..")
                    logger.fdebug("Sucessfully wrote metadata to .cbz (" + str(otofilename) + ") - Continuing..")
            #Run Pre-script

            if mylar.ENABLE_PRE_SCRIPTS:
                nzbn = self.nzb_name #original nzb name
                nzbf = self.nzb_folder #original nzb folder
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_pre_scripts(nzbn, nzbf, seriesmetadata )

        #rename file and move to new path
        #nfilename = series + " " + issueno + " (" + seriesyear + ")"

            file_values = {'$Series':    series,
                           '$Issue':     prettycomiss,
                           '$Year':      issueyear,
                           '$series':    series.lower(),
                           '$Publisher': publisher,
                           '$publisher': publisher.lower(),
                           '$VolumeY':   'V' + str(seriesyear),
                           '$VolumeN':   comversion
                          }


            #if it's a Manual Run, use the ml['ComicLocation'] for the exact filename.
            if ml is None:

                for root, dirnames, filenames in os.walk(self.nzb_folder):
                    for filename in filenames:
                        if filename.lower().endswith(extensions):
                            ofilename = filename
                            path, ext = os.path.splitext(ofilename)
            else:
                if pcheck == "fail":
                    otofilename = ml['ComicLocation']
                print "otofilename:" + str(otofilename)
                odir, ofilename = os.path.split(otofilename)
                print "ofilename: " + str(ofilename)
                path, ext = os.path.splitext(ofilename)
                print "path: " + str(path)
                print "ext:" + str(ext)

            if ofilename is None:
                logger.error(u"Aborting PostProcessing - the filename doesn't exist in the location given. Make sure that " + str(self.nzb_folder) + " exists and is the correct location.")
                return
            self._log("Original Filename: " + ofilename, logger.DEBUG)
            self._log("Original Extension: " + ext, logger.DEBUG)
            logger.fdebug("Original Filname: " + str(ofilename))
            logger.fdebug("Original Extension: " + str(ext))

            if mylar.FILE_FORMAT == '' or not mylar.RENAME_FILES:
                self._log("Rename Files isn't enabled...keeping original filename.", logger.DEBUG)
                logger.fdebug("Rename Files isn't enabled - keeping original filename.")
                #check if extension is in nzb_name - will screw up otherwise
                if ofilename.lower().endswith(extensions):
                    nfilename = ofilename[:-4]
                else:
                    nfilename = ofilename
            else:
                nfilename = helpers.replace_all(chunk_file_format, file_values)
                if mylar.REPLACE_SPACES:
                    #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                    nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
            nfilename = re.sub('[\,\:\?]', '', nfilename)
            self._log("New Filename: " + nfilename, logger.DEBUG)
            logger.fdebug("New Filename: " + str(nfilename))

            src = os.path.join(self.nzb_folder, ofilename)

            filechecker.validateAndCreateDirectory(comlocation, True)

            if mylar.LOWERCASE_FILENAMES:
                dst = (comlocation + "/" + nfilename + ext).lower()
            else:
                dst = comlocation + "/" + nfilename + ext.lower()    
            self._log("Source:" + src, logger.DEBUG)
            self._log("Destination:" +  dst, logger.DEBUG)
            logger.fdebug("Source: " + str(src))
            logger.fdebug("Destination: " + str(dst))

            if ml is None:
                #non-manual run moving/deleting...
                os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                src = os.path.join(self.nzb_folder, str(nfilename + ext))
                try:
                    shutil.move(src, dst)
                except (OSError, IOError):
                    self._log("Failed to move directory - check directories and manually re-run.", logger.DEBUG)
                    self._log("Post-Processing ABORTED.", logger.DEBUG)
                    return
                #tidyup old path
                try:
                    shutil.rmtree(self.nzb_folder)
                except (OSError, IOError):
                    self._log("Failed to remove temporary directory - check directory and manually re-run.", logger.DEBUG)
                    self._log("Post-Processing ABORTED.", logger.DEBUG)
                    return

                self._log("Removed temporary directory : " + str(self.nzb_folder), logger.DEBUG)
            else:
                #Manual Run, this is the portion.
                logger.fdebug("Renaming " + os.path.join(self.nzb_folder, str(ofilename)) + " ..to.. " + os.path.join(self.nzb_folder,str(nfilename + ext)))
                os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                src = os.path.join(self.nzb_folder, str(nfilename + ext))
                logger.fdebug("Moving " + src + " ... to ... " + dst)
                try:
                    shutil.move(src, dst)
                except (OSError, IOError):
                    logger.fdebug("Failed to move directory - check directories and manually re-run.")
                    logger.fdebug("Post-Processing ABORTED.")
                    return
                logger.fdebug("Successfully moved to : " + dst)
                #tidyup old path
                #try:
                #    os.remove(os.path.join(self.nzb_folder, str(ofilename)))
                #    logger.fdebug("Deleting : " + os.path.join(self.nzb_folder, str(ofilename)))
                #except (OSError, IOError):
                #    logger.fdebug("Failed to remove temporary directory - check directory and manually re-run.")
                #    logger.fdebug("Post-Processing ABORTED.")
                #    return
                #logger.fdebug("Removed temporary directory : " + str(self.nzb_folder))

                    #delete entry from nzblog table
            myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                    #update snatched table to change status to Downloaded
            if annchk == "no":
                updater.foundsearch(comicid, issueid, down='True')
            else:
                updater.foundsearch(comicid, issueid, mode='want_ann', down='True')
                    #force rescan of files
            updater.forceRescan(comicid)
            logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenumOG) )
            self._log(u"Post Processing SUCCESSFULL! ", logger.DEBUG)
            if ml is not None: 
                return
            else:
                if mylar.PROWL_ENABLED:
                    pushmessage = series + '(' + issueyear + ') - issue #' + issuenumOG
                    logger.info(u"Prowl request")
                    prowl = notifiers.PROWL()
                    prowl.notify(pushmessage,"Download and Postprocessing completed")
    
                if mylar.NMA_ENABLED:
                    nma = notifiers.NMA()
                    nma.notify(series, str(issueyear), str(issuenumOG))

                if mylar.PUSHOVER_ENABLED:
                    pushmessage = series + ' (' + str(issueyear) + ') - issue #' + str(issuenumOG)
                    logger.info(u"Pushover request")
                    pushover = notifiers.PUSHOVER()
                    pushover.notify(pushmessage, "Download and Post-Processing completed")

                if mylar.BOXCAR_ENABLED:
                    boxcar = notifiers.BOXCAR()
                    boxcar.notify(series, str(issueyear), str(issuenumOG))

             
            # retrieve/create the corresponding comic objects

            if mylar.ENABLE_EXTRA_SCRIPTS:
                folderp = str(dst) #folder location after move/rename
                nzbn = self.nzb_name #original nzb name
                filen = str(nfilename + ext) #new filename
                #name, comicyear, comicid , issueid, issueyear, issue, publisher
                #create the dic and send it.
                seriesmeta = []
                seriesmetadata = {}
                seriesmeta.append({
                            'name':                 series,
                            'comicyear':            seriesyear,
                            'comicid':              comicid,
                            'issueid':              issueid,
                            'issueyear':            issueyear,
                            'issue':                issuenum,
                            'publisher':            publisher
                            })
                seriesmetadata['seriesmeta'] = seriesmeta
                self._run_extra_scripts(nzbname, self.nzb_folder, filen, folderp, seriesmetadata )

            return self.log
Ejemplo n.º 24
0
def GCDimport(gcomicid):
    # this is for importing via GCD only and not using CV.
    # used when volume spanning is discovered for a Comic (and can't be added using CV).
    # Issue Counts are wrong (and can't be added).

    # because Comicvine ComicID and GCD ComicID could be identical at some random point, let's distinguish.
    # CV = comicid, GCD = gcomicid :) (ie. CV=2740, GCD=G3719)

    gcdcomicid = gcomicid
    myDB = db.DBConnection()

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID": gcdcomicid}

    comic = myDB.action(
        "SELECT ComicName, ComicYear, Total, ComicPublished, ComicImage, ComicLocation FROM comics WHERE ComicID=?",
        [gcomicid],
    ).fetchone()
    ComicName = comic[0]
    ComicYear = comic[1]
    ComicIssues = comic[2]
    comlocation = comic[5]
    # ComicImage = comic[4]
    # print ("Comic:" + str(ComicName))

    newValueDict = {"Status": "Loading"}
    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now
    # comic = cv.getComic(comicid,'comic')

    if not comic:
        logger.warn("Error fetching comic. ID for : " + gcdcomicid)
        if dbcomic is None:
            newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (gcdcomicid), "Status": "Active"}
        else:
            newValueDict = {"Status": "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return

    if ComicName.startswith("The "):
        sortname = ComicName[4:]
    else:
        sortname = ComicName

    logger.info(u"Now adding/updating: " + ComicName)
    # --Now that we know ComicName, let's try some scraping
    # --Start
    # gcd will return issue details (most importantly publishing date)
    comicid = gcomicid[1:]
    resultURL = "/series/" + str(comicid) + "/"
    gcdinfo = parseit.GCDdetails(
        comseries=None,
        resultURL=resultURL,
        vari_loop=0,
        ComicID=gcdcomicid,
        TotalIssues=ComicIssues,
        issvariation=None,
        resultPublished=None,
    )
    if gcdinfo == "No Match":
        logger.warn("No matching result found for " + ComicName + " (" + ComicYear + ")")
        updater.no_searchresults(gcomicid)
        nomatch = "true"
        return nomatch
    logger.info(u"Sucessfully retrieved details for " + ComicName)
    # print ("Series Published" + parseit.resultPublished)
    # --End

    ComicImage = gcdinfo["ComicImage"]

    # comic book location on machine
    # setup default location here
    if comlocation is None:
        if ":" in ComicName or "/" in ComicName or "," in ComicName:
            comicdir = ComicName
            if ":" in comicdir:
                comicdir = comicdir.replace(":", "")
            if "/" in comicdir:
                comicdir = comicdir.replace("/", "-")
            if "," in comicdir:
                comicdir = comicdir.replace(",", "")
        else:
            comicdir = ComicName
        comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + ComicYear + ")"
        if mylar.DESTINATION_DIR == "":
            logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
            return
        if mylar.REPLACE_SPACES:
            # mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
            comlocation = comlocation.replace(" ", mylar.REPLACE_CHAR)
        # if it doesn't exist - create it (otherwise will bugger up later on)
        if os.path.isdir(str(comlocation)):
            logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
        else:
            # print ("Directory doesn't exist!")
            try:
                os.makedirs(str(comlocation))
                logger.info(u"Directory successfully created at: " + str(comlocation))
            except OSError:
                logger.error(u"Could not create comicdir : " + str(comlocation))

    comicIssues = gcdinfo["totalissues"]

    # let's download the image...
    if os.path.exists(mylar.CACHE_DIR):
        pass
    else:
        # let's make the dir.
        try:
            os.makedirs(str(mylar.CACHE_DIR))
            logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))

        except OSError:
            logger.error(u"Could not create cache dir : " + str(mylar.CACHE_DIR))

    coverfile = mylar.CACHE_DIR + "/" + str(gcomicid) + ".jpg"

    urllib.urlretrieve(str(ComicImage), str(coverfile))
    try:
        with open(str(coverfile)) as f:
            ComicImage = "cache/" + str(gcomicid) + ".jpg"
            logger.info(u"Sucessfully retrieved cover for " + str(ComicName))
    except IOError as e:
        logger.error(u"Unable to save cover locally at this time.")

    controlValueDict = {"ComicID": gcomicid}
    newValueDict = {
        "ComicName": ComicName,
        "ComicSortName": sortname,
        "ComicYear": ComicYear,
        "Total": comicIssues,
        "ComicLocation": comlocation,
        "ComicImage": ComicImage,
        # "ComicPublisher":   comic['ComicPublisher'],
        # "ComicPublished":   comicPublished,
        "DateAdded": helpers.today(),
        "Status": "Loading",
    }

    myDB.upsert("comics", newValueDict, controlValueDict)

    logger.info(u"Sucessfully retrieved issue details for " + ComicName)
    n = 0
    iscnt = int(comicIssues)
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    # let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    # print ("total issues:" + str(iscnt))
    # ---removed NEW code here---
    logger.info(u"Now adding/updating issues for " + ComicName)
    bb = 0
    while bb <= iscnt:
        # ---NEW.code
        try:
            gcdval = gcdinfo["gcdchoice"][bb]
            # print ("gcdval: " + str(gcdval))
        except IndexError:
            # account for gcd variation here
            if gcdinfo["gcdvariation"] == "gcd":
                # print ("gcd-variation accounted for.")
                issdate = "0000-00-00"
                int_issnum = int(issis / 1000)
            break
        if "nn" in str(gcdval["GCDIssue"]):
            # no number detected - GN, TP or the like
            logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
            updater.no_searchresults(comicid)
            return
        elif "." in str(gcdval["GCDIssue"]):
            issst = str(gcdval["GCDIssue"]).find(".")
            issb4dec = str(gcdval["GCDIssue"])[:issst]
            # if the length of decimal is only 1 digit, assume it's a tenth
            decis = str(gcdval["GCDIssue"])[issst + 1 :]
            if len(decis) == 1:
                decisval = int(decis) * 10
                issaftdec = str(decisval)
            if len(decis) == 2:
                decisval = int(decis)
                issaftdec = str(decisval)
            if int(issaftdec) == 0:
                issaftdec = "00"
            gcd_issue = issb4dec + "." + issaftdec
            gcdis = (int(issb4dec) * 1000) + decisval
        else:
            gcdis = int(str(gcdval["GCDIssue"])) * 1000
            gcd_issue = str(gcdval["GCDIssue"])
        # get the latest issue / date using the date.
        int_issnum = int(gcdis / 1000)
        issdate = str(gcdval["GCDDate"])
        issid = "G" + str(gcdval["IssueID"])
        if gcdval["GCDDate"] > latestdate:
            latestiss = str(gcd_issue)
            latestdate = str(gcdval["GCDDate"])
        # print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
        # ---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone()

        # Only change the status & add DateAdded if the issue is not already in the database
        if iss_exists is None:
            newValueDict["DateAdded"] = helpers.today()

        # adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
        if "?" in str(issdate):
            issdate = "0000-00-00"

        controlValueDict = {"IssueID": issid}
        newValueDict = {
            "ComicID": gcomicid,
            "ComicName": ComicName,
            "Issue_Number": gcd_issue,
            "IssueDate": issdate,
            "Int_IssueNumber": int_issnum,
        }

        # print ("issueid:" + str(controlValueDict))
        # print ("values:" + str(newValueDict))

        if mylar.AUTOWANT_ALL:
            newValueDict["Status"] = "Wanted"
            # elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING:
            #    newValueDict['Status'] = "Wanted"
        else:
            newValueDict["Status"] = "Skipped"

        if iss_exists:
            # print ("Existing status : " + str(iss_exists['Status']))
            newValueDict["Status"] = iss_exists["Status"]

        myDB.upsert("issues", newValueDict, controlValueDict)
        bb += 1

    #        logger.debug(u"Updating comic cache for " + ComicName)
    #        cache.getThumb(ComicID=issue['issueid'])

    #        logger.debug(u"Updating cache for: " + ComicName)
    #        cache.getThumb(ComicIDcomicid)

    # check for existing files...
    updater.forceRescan(gcomicid)

    controlValueStat = {"ComicID": gcomicid}
    newValueStat = {
        "Status": "Active",
        "LatestIssue": latestiss,
        "LatestDate": latestdate,
        "LastUpdated": helpers.now(),
    }

    myDB.upsert("comics", newValueStat, controlValueStat)

    logger.info(u"Updating complete for: " + ComicName)

    # lets' check the pullist for anyting at this time as well since we're here.
    if mylar.AUTOWANT_UPCOMING:
        logger.info(u"Checking this week's pullist for new issues of " + str(ComicName))
        updater.newpullcheck()

    # here we grab issues that have been marked as wanted above...

    results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid])
    if results:
        logger.info(u"Attempting to grab wanted issues for : " + ComicName)

        for result in results:
            foundNZB = "none"
            if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
                foundNZB = search.searchforissue(result["IssueID"])
                if foundNZB == "yes":
                    updater.foundsearch(result["ComicID"], result["IssueID"])
    else:
        logger.info(u"No issues marked as wanted for " + ComicName)

    logger.info(u"Finished grabbing what I could.")
Ejemplo n.º 25
0
def addComictoDB(comicid, mismatch=None):
    # Putting this here to get around the circular import. Will try to use this to update images at later date.
    from mylar import cache

    myDB = db.DBConnection()

    # We need the current minimal info in the database instantly
    # so we don't throw a 500 error when we redirect to the artistPage

    controlValueDict = {"ComicID": comicid}

    dbcomic = myDB.action("SELECT * FROM comics WHERE ComicID=?", [comicid]).fetchone()
    if dbcomic is None:
        newValueDict = {"ComicName": "Comic ID: %s" % (comicid), "Status": "Loading"}
        comlocation = None
    else:
        newValueDict = {"Status": "Loading"}
        comlocation = dbcomic["ComicLocation"]

    myDB.upsert("comics", newValueDict, controlValueDict)

    # we need to lookup the info for the requested ComicID in full now
    comic = cv.getComic(comicid, "comic")
    # comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [comicid]).fetchone()
    if not comic:
        logger.warn("Error fetching comic. ID for : " + comicid)
        if dbcomic is None:
            newValueDict = {"ComicName": "Fetch failed, try refreshing. (%s)" % (comicid), "Status": "Active"}
        else:
            newValueDict = {"Status": "Active"}
        myDB.upsert("comics", newValueDict, controlValueDict)
        return

    if comic["ComicName"].startswith("The "):
        sortname = comic["ComicName"][4:]
    else:
        sortname = comic["ComicName"]

    logger.info(u"Now adding/updating: " + comic["ComicName"])
    # --Now that we know ComicName, let's try some scraping
    # --Start
    # gcd will return issue details (most importantly publishing date)
    if mismatch == "no" or mismatch is None:
        gcdinfo = parseit.GCDScraper(comic["ComicName"], comic["ComicYear"], comic["ComicIssues"], comicid)
        mismatch_com = "no"
        if gcdinfo == "No Match":
            updater.no_searchresults(comicid)
            nomatch = "true"
            logger.info(
                u"There was an error when trying to add " + comic["ComicName"] + " (" + comic["ComicYear"] + ")"
            )
            return nomatch
        else:
            mismatch_com = "yes"
            # print ("gcdinfo:" + str(gcdinfo))

    elif mismatch == "yes":
        CV_EXcomicid = myDB.action("SELECT * from exceptions WHERE ComicID=?", [comicid]).fetchone()
        if CV_EXcomicid["variloop"] is None:
            pass
        else:
            vari_loop = CV_EXcomicid["variloop"]
            NewComicID = CV_EXcomicid["NewComicID"]
            gcomicid = CV_EXcomicid["GComicID"]
            resultURL = "/series/" + str(NewComicID) + "/"
            # print ("variloop" + str(CV_EXcomicid['variloop']))
            # if vari_loop == '99':
            gcdinfo = parseit.GCDdetails(
                comseries=None,
                resultURL=resultURL,
                vari_loop=0,
                ComicID=comicid,
                TotalIssues=0,
                issvariation="no",
                resultPublished=None,
            )

    logger.info(u"Sucessfully retrieved details for " + comic["ComicName"])
    # print ("Series Published" + parseit.resultPublished)

    # comic book location on machine
    # setup default location here

    if comlocation is None:
        if ":" in comic["ComicName"] or "/" in comic["ComicName"] or "," in comic["ComicName"]:
            comicdir = comic["ComicName"]
            if ":" in comicdir:
                comicdir = comicdir.replace(":", "")
            if "/" in comicdir:
                comicdir = comicdir.replace("/", "-")
            if "," in comicdir:
                comicdir = comicdir.replace(",", "")
        else:
            comicdir = comic["ComicName"]

        series = comicdir
        publisher = comic["ComicPublisher"]
        year = comic["ComicYear"]

        # do work to generate folder path

        values = {"$Series": series, "$Publisher": publisher, "$Year": year}

        # print mylar.FOLDER_FORMAT
        # print 'working dir:'
        # print helpers.replace_all(mylar.FOLDER_FORMAT, values)

        if mylar.FOLDER_FORMAT == "":
            comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic["ComicYear"] + ")"
        else:
            comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values)

        # comlocation = mylar.DESTINATION_DIR + "/" + comicdir + " (" + comic['ComicYear'] + ")"
        if mylar.DESTINATION_DIR == "":
            logger.error(u"There is no general directory specified - please specify in Config/Post-Processing.")
            return
        if mylar.REPLACE_SPACES:
            # mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
            comlocation = comlocation.replace(" ", mylar.REPLACE_CHAR)
        # if it doesn't exist - create it (otherwise will bugger up later on)
        if os.path.isdir(str(comlocation)):
            logger.info(u"Directory (" + str(comlocation) + ") already exists! Continuing...")
        else:
            # print ("Directory doesn't exist!")
            try:
                os.makedirs(str(comlocation))
                logger.info(u"Directory successfully created at: " + str(comlocation))
            except OSError:
                logger.error(u"Could not create comicdir : " + str(comlocation))

    # try to account for CV not updating new issues as fast as GCD
    # seems CV doesn't update total counts
    # comicIssues = gcdinfo['totalissues']
    if gcdinfo["gcdvariation"] == "cv":
        comicIssues = str(int(comic["ComicIssues"]) + 1)
    else:
        comicIssues = comic["ComicIssues"]

    # let's download the image...
    if os.path.exists(mylar.CACHE_DIR):
        pass
    else:
        # let's make the dir.
        try:
            os.makedirs(str(mylar.CACHE_DIR))
            logger.info(u"Cache Directory successfully created at: " + str(mylar.CACHE_DIR))

        except OSError:
            logger.error("Could not create cache dir. Check permissions of cache dir: " + str(mylar.CACHE_DIR))

    coverfile = mylar.CACHE_DIR + "/" + str(comicid) + ".jpg"

    # try:
    urllib.urlretrieve(str(comic["ComicImage"]), str(coverfile))
    try:
        with open(str(coverfile)) as f:
            ComicImage = "cache/" + str(comicid) + ".jpg"
            logger.info(u"Sucessfully retrieved cover for " + str(comic["ComicName"]))
    except IOError as e:
        logger.error(u"Unable to save cover locally at this time.")

    controlValueDict = {"ComicID": comicid}
    newValueDict = {
        "ComicName": comic["ComicName"],
        "ComicSortName": sortname,
        "ComicYear": comic["ComicYear"],
        "ComicImage": ComicImage,
        "Total": comicIssues,
        "ComicLocation": comlocation,
        "ComicPublisher": comic["ComicPublisher"],
        "ComicPublished": gcdinfo["resultPublished"],
        "DateAdded": helpers.today(),
        "Status": "Loading",
    }

    myDB.upsert("comics", newValueDict, controlValueDict)

    issued = cv.getComic(comicid, "issue")
    logger.info(u"Sucessfully retrieved issue details for " + comic["ComicName"])
    n = 0
    iscnt = int(comicIssues)
    issid = []
    issnum = []
    issname = []
    issdate = []
    int_issnum = []
    # let's start issue #'s at 0 -- thanks to DC for the new 52 reboot! :)
    latestiss = "0"
    latestdate = "0000-00-00"
    # print ("total issues:" + str(iscnt))
    # ---removed NEW code here---
    logger.info(u"Now adding/updating issues for " + comic["ComicName"])

    # file check to see if issue exists
    logger.info(u"Checking directory for existing issues.")
    # fc = filechecker.listFiles(dir=comlocation, watchcomic=comic['ComicName'])
    # havefiles = 0

    # fccnt = int(fc['comiccount'])
    # logger.info(u"Found " + str(fccnt) + "/" + str(iscnt) + " issues of " + comic['ComicName'] + "...verifying")
    # fcnew = []

    while n <= iscnt:
        # ---NEW.code
        try:
            firstval = issued["issuechoice"][n]
        except IndexError:
            break
        cleanname = helpers.cleanName(firstval["Issue_Name"])
        issid = str(firstval["Issue_ID"])
        issnum = str(firstval["Issue_Number"])
        issname = cleanname
        if "." in str(issnum):
            issn_st = str(issnum).find(".")
            issn_b4dec = str(issnum)[:issn_st]
            # if the length of decimal is only 1 digit, assume it's a tenth
            dec_is = str(issnum)[issn_st + 1 :]
            if len(dec_is) == 1:
                dec_nisval = int(dec_is) * 10
                iss_naftdec = str(dec_nisval)
            if len(dec_is) == 2:
                dec_nisval = int(dec_is)
                iss_naftdec = str(dec_nisval)
            iss_issue = issn_b4dec + "." + iss_naftdec
            issis = (int(issn_b4dec) * 1000) + dec_nisval
        else:
            issis = int(issnum) * 1000

        bb = 0
        while bb <= iscnt:
            try:
                gcdval = gcdinfo["gcdchoice"][bb]
            except IndexError:
                # account for gcd variation here
                if gcdinfo["gcdvariation"] == "gcd":
                    # print ("gcd-variation accounted for.")
                    issdate = "0000-00-00"
                    int_issnum = int(issis / 1000)
                break
            if "nn" in str(gcdval["GCDIssue"]):
                # no number detected - GN, TP or the like
                logger.warn(u"Non Series detected (Graphic Novel, etc) - cannot proceed at this time.")
                updater.no_searchresults(comicid)
                return
            elif "." in str(gcdval["GCDIssue"]):
                # print ("g-issue:" + str(gcdval['GCDIssue']))
                issst = str(gcdval["GCDIssue"]).find(".")
                # print ("issst:" + str(issst))
                issb4dec = str(gcdval["GCDIssue"])[:issst]
                # print ("issb4dec:" + str(issb4dec))
                # if the length of decimal is only 1 digit, assume it's a tenth
                decis = str(gcdval["GCDIssue"])[issst + 1 :]
                # print ("decis:" + str(decis))
                if len(decis) == 1:
                    decisval = int(decis) * 10
                    issaftdec = str(decisval)
                if len(decis) == 2:
                    decisval = int(decis)
                    issaftdec = str(decisval)
                gcd_issue = issb4dec + "." + issaftdec
                # print ("gcd_issue:" + str(gcd_issue))
                gcdis = (int(issb4dec) * 1000) + decisval
            else:
                gcdis = int(str(gcdval["GCDIssue"])) * 1000
            if gcdis == issis:
                issdate = str(gcdval["GCDDate"])
                int_issnum = int(gcdis / 1000)
                # get the latest issue / date using the date.
                if gcdval["GCDDate"] > latestdate:
                    latestiss = str(issnum)
                    latestdate = str(gcdval["GCDDate"])
                    break
                # bb = iscnt
            bb += 1
        # print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate))
        # ---END.NEW.

        # check if the issue already exists
        iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone()

        # Only change the status & add DateAdded if the issue is already in the database
        if iss_exists is None:
            newValueDict["DateAdded"] = helpers.today()

        controlValueDict = {"IssueID": issid}
        newValueDict = {
            "ComicID": comicid,
            "ComicName": comic["ComicName"],
            "IssueName": issname,
            "Issue_Number": issnum,
            "IssueDate": issdate,
            "Int_IssueNumber": int_issnum,
        }
        if mylar.AUTOWANT_ALL:
            newValueDict["Status"] = "Wanted"
            # elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING:
            #    newValueDict['Status'] = "Wanted"
        else:
            newValueDict["Status"] = "Skipped"

        if iss_exists:
            # print ("Existing status : " + str(iss_exists['Status']))
            newValueDict["Status"] = iss_exists["Status"]

        myDB.upsert("issues", newValueDict, controlValueDict)
        n += 1

    #        logger.debug(u"Updating comic cache for " + comic['ComicName'])
    #        cache.getThumb(ComicID=issue['issueid'])

    #        logger.debug(u"Updating cache for: " + comic['ComicName'])
    #        cache.getThumb(ComicIDcomicid)

    # check for existing files...
    updater.forceRescan(comicid)

    controlValueStat = {"ComicID": comicid}
    newValueStat = {
        "Status": "Active",
        "LatestIssue": latestiss,
        "LatestDate": latestdate,
        "LastUpdated": helpers.now(),
    }

    myDB.upsert("comics", newValueStat, controlValueStat)

    logger.info(u"Updating complete for: " + comic["ComicName"])

    # lets' check the pullist for anyting at this time as well since we're here.
    if mylar.AUTOWANT_UPCOMING:
        logger.info(u"Checking this week's pullist for new issues of " + str(comic["ComicName"]))
        updater.newpullcheck()

    # here we grab issues that have been marked as wanted above...

    results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid])
    if results:
        logger.info(u"Attempting to grab wanted issues for : " + comic["ComicName"])

        for result in results:
            foundNZB = "none"
            if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
                foundNZB = search.searchforissue(result["IssueID"])
                if foundNZB == "yes":
                    updater.foundsearch(result["ComicID"], result["IssueID"])
    else:
        logger.info(u"No issues marked as wanted for " + comic["ComicName"])

    logger.info(u"Finished grabbing what I could.")
Ejemplo n.º 26
0
def libraryScan(dir=None, append=False, ComicID=None, ComicName=None, cron=None):

    if cron and not mylar.LIBRARYSCAN:
        return
        
    if not dir:
        dir = mylar.COMIC_DIR
    
    # If we're appending a dir, it's coming from the post processor which is
    # already bytestring
    if not append:
        dir = dir.encode(mylar.SYS_ENCODING)
        
    if not os.path.isdir(dir):
        logger.warn('Cannot find directory: %s. Not scanning' % dir.decode(mylar.SYS_ENCODING, 'replace'))
        return

    
    logger.info('Scanning comic directory: %s' % dir.decode(mylar.SYS_ENCODING, 'replace'))

    basedir = dir

    comic_list = []
    comiccnt = 0
    extensions = ('cbr','cbz')
    for r,d,f in os.walk(dir):
        #for directory in d[:]:
        #    if directory.startswith("."):
        #        d.remove(directory)
        for files in f:
            if any(files.lower().endswith('.' + x.lower()) for x in extensions):
                comic = files
                comicpath = os.path.join(r, files)
                comicsize = os.path.getsize(comicpath)
                print "Comic: " + comic
                print "Comic Path: " + comicpath
                print "Comic Size: " + str(comicsize)

                # We need the unicode path to use for logging, inserting into database
                unicode_comic_path = comicpath.decode(mylar.SYS_ENCODING, 'replace')

                comiccnt+=1
                comic_dict = { 'ComicFilename':           comic,
                               'ComicLocation':           comicpath,
                               'ComicSize':               comicsize,
                               'Unicode_ComicLocation':   unicode_comic_path }
                comic_list.append(comic_dict)

        logger.info("I've found a total of " + str(comiccnt) + " comics....analyzing now")
        logger.info("comiclist: " + str(comic_list))
    myDB = db.DBConnection()

    #let's load in the watchlist to see if we have any matches.
    logger.info("loading in the watchlist to see if a series is being watched already...")
    watchlist = myDB.select("SELECT * from comics")
    ComicName = []
    DisplayName = []
    ComicYear = []
    ComicPublisher = []
    ComicTotal = []
    ComicID = []
    ComicLocation = []

    AltName = []
    watchcnt = 0

    watch_kchoice = []
    watchchoice = {}
    import_by_comicids = []
    import_comicids = {}

    for watch in watchlist:
        watchdisplaycomic = re.sub('[\_\#\,\/\:\;\!\$\%\&\+\'\?\@]', ' ', watch['ComicName']).encode('utf-8').strip()
        # let's clean up the name, just in case for comparison purposes...
        watchcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', watch['ComicName']).encode('utf-8').strip()
        #watchcomic = re.sub('\s+', ' ', str(watchcomic)).strip()

        if ' the ' in watchcomic.lower():
            #drop the 'the' from the watchcomic title for proper comparisons.
            watchcomic = watchcomic[-4:]

        alt_chk = "no" # alt-checker flag (default to no)
         
        # account for alternate names as well
        if watch['AlternateSearch'] is not None and watch['AlternateSearch'] is not 'None':
            altcomic = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\+\'\?\@]', ' ', watch['AlternateSearch']).encode('utf-8').strip()
            #altcomic = re.sub('\s+', ' ', str(altcomic)).strip()
            AltName.append(altcomic)
            alt_chk = "yes"  # alt-checker flag

        ComicName.append(watchcomic)
        DisplayName.append(watchdisplaycomic)
        ComicYear.append(watch['ComicYear'])
        ComicPublisher.append(watch['ComicPublisher'])
        ComicTotal.append(watch['Total'])
        ComicID.append(watch['ComicID'])
        ComicLocation.append(watch['ComicLocation'])
        watchcnt+=1

    logger.info("Successfully loaded " + str(watchcnt) + " series from your watchlist.")

    ripperlist=['digital-',
                'empire',
                'dcp']

    watchfound = 0

    datelist = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
#    datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
#    #search for number as text, and change to numeric
#    for numbs in basnumbs:
#        #print ("numbs:" + str(numbs))
#        if numbs in ComicName.lower():
#            numconv = basnumbs[numbs]
#            #print ("numconv: " + str(numconv))


    for i in comic_list:
        print i['ComicFilename']

        comfilename = i['ComicFilename']
        comlocation = i['ComicLocation']
        #let's clean up the filename for matching purposes

        cfilename = re.sub('[\_\#\,\/\:\;\-\!\$\%\&\+\'\?\@]', ' ', comfilename)
        #cfilename = re.sub('\s', '_', str(cfilename))
        d_filename = re.sub('[\_\#\,\/\;\!\$\%\&\?\@]', ' ', comfilename)
        d_filename = re.sub('[\:\-\+\']', '#', d_filename)

        #versioning - remove it
        subsplit = cfilename.replace('_', ' ').split()
        volno = None
        volyr = None
        for subit in subsplit:
            if subit[0].lower() == 'v':
                vfull = 0
                if subit[1:].isdigit():
                    #if in format v1, v2009 etc...
                    if len(subit) > 3:
                        # if it's greater than 3 in length, then the format is Vyyyy
                        vfull = 1 # add on 1 character length to account for extra space
                    cfilename = re.sub(subit, '', cfilename)
                    d_filename = re.sub(subit, '', d_filename)
                    volno = re.sub("[^0-9]", " ", subit)
                elif subit.lower()[:3] == 'vol':
                    #if in format vol.2013 etc
                    #because the '.' in Vol. gets removed, let's loop thru again after the Vol hit to remove it entirely
                    logger.fdebug('volume indicator detected as version #:' + str(subit))
                    cfilename = re.sub(subit, '', cfilename)
                    cfilename = " ".join(cfilename.split())
                    d_filename = re.sub(subit, '', d_filename)
                    d_filename = " ".join(d_filename.split())
                    volyr = re.sub("[^0-9]", " ", subit).strip()
                    logger.fdebug('volume year set as : ' + str(volyr))
        cm_cn = 0

        #we need to track the counter to make sure we are comparing the right array parts
        #this takes care of the brackets :)
        m = re.findall('[^()]+', cfilename)
        lenm = len(m)
        logger.fdebug("there are " + str(lenm) + " words.")
        cnt = 0
        yearmatch = "false"
        foundonwatch = "False"
        issue = 999999


        while (cnt < lenm):
            if m[cnt] is None: break
            if m[cnt] == ' ':
                pass
            else:
                logger.fdebug(str(cnt) + ". Bracket Word: " + m[cnt])
                if cnt == 0:
                    comic_andiss = m[cnt]
                    logger.fdebug("Comic: " + comic_andiss)
                    # if it's not in the standard format this will bork.
                    # let's try to accomodate (somehow).
                    # first remove the extension (if any)
                    extensions = ('cbr', 'cbz')
                    if comic_andiss.lower().endswith(extensions):
                        comic_andiss = comic_andiss[:-4]
                        logger.fdebug("removed extension from filename.")
                    #now we have to break up the string regardless of formatting.
                    #let's force the spaces.
                    comic_andiss = re.sub('_', ' ', comic_andiss)
                    cs = comic_andiss.split()
                    cs_len = len(cs)
                    cn = ''
                    ydetected = 'no'
                    idetected = 'no'
                    decimaldetect = 'no'
                    for i in reversed(xrange(len(cs))):
                        #start at the end.
                        logger.fdebug("word: " + str(cs[i]))
                        #assume once we find issue - everything prior is the actual title
                        #idetected = no will ignore everything so it will assume all title                            
                        if cs[i][:-2] == '19' or cs[i][:-2] == '20' and idetected == 'no':
                            logger.fdebug("year detected: " + str(cs[i]))
                            ydetected = 'yes'
                            result_comyear = cs[i]
                        elif cs[i].isdigit() and idetected == 'no' or '.' in cs[i]:
                            issue = cs[i]
                            logger.fdebug("issue detected : " + str(issue))
                            idetected = 'yes'
                            if '.' in cs[i]:
                                #make sure it's a number on either side of decimal and assume decimal issue.
                                decst = cs[i].find('.')
                                dec_st = cs[i][:decst]
                                dec_en = cs[i][decst+1:]
                                logger.fdebug("st: " + str(dec_st))
                                logger.fdebug("en: " + str(dec_en))
                                if dec_st.isdigit() and dec_en.isdigit():
                                    logger.fdebug("decimal issue detected...adjusting.")
                                    issue = dec_st + "." + dec_en
                                    logger.fdebug("issue detected: " + str(issue))
                                    idetected = 'yes'
                                else:
                                    logger.fdebug("false decimal represent. Chunking to extra word.")
                                    cn = cn + cs[i] + " "
                                    break
                        elif '\#' in cs[i] or decimaldetect == 'yes':
                            logger.fdebug("issue detected: " + str(cs[i]))
                            idetected = 'yes'
                        else: cn = cn + cs[i] + " "
                    if ydetected == 'no':
                        #assume no year given in filename...
                        result_comyear = "0000"
                    logger.fdebug("cm?: " + str(cn))
                    if issue is not '999999':
                        comiss = issue
                    else:
                        logger.ERROR("Invalid Issue number (none present) for " + comfilename)
                        break
                    cnsplit = cn.split()
                    cname = ''
                    findcn = 0
                    while (findcn < len(cnsplit)):
                        cname = cname + cs[findcn] + " "
                        findcn+=1
                    cname = cname[:len(cname)-1] # drop the end space...
                    print ("assuming name is : " + cname)
                    com_NAME = cname
                    print ("com_NAME : " + com_NAME)
                    yearmatch = "True"
                else:
                    logger.fdebug('checking ' + m[cnt])
                    # we're assuming that the year is in brackets (and it should be damnit)
                    if m[cnt][:-2] == '19' or m[cnt][:-2] == '20':
                        print ("year detected: " + str(m[cnt]))
                        ydetected = 'yes'
                        result_comyear = m[cnt]
                    elif m[cnt][:3].lower() in datelist:
                        logger.fdebug('possible issue date format given - verifying')
                        #if the date of the issue is given as (Jan 2010) or (January 2010) let's adjust.
                        #keeping in mind that ',' and '.' are already stripped from the string
                        if m[cnt][-4:].isdigit():
                            ydetected = 'yes'
                            result_comyear = m[cnt][-4:]
                            logger.fdebug('Valid Issue year of ' + str(result_comyear) + 'detected in format of ' + str(m[cnt]))
            cnt+=1

        displength = len(cname)
        print 'd_filename is : ' + d_filename
        charcount = d_filename.count('#')
        print ('charcount is : ' + str(charcount))
        if charcount > 0:
            print ('entering loop')
            for i,m in enumerate(re.finditer('\#', d_filename)):
                if m.end() <= displength:
                    print comfilename[m.start():m.end()]
                    # find occurance in c_filename, then replace into d_filname so special characters are brought across
                    newchar = comfilename[m.start():m.end()]
                    print 'newchar:' + str(newchar)
                    d_filename = d_filename[:m.start()] + str(newchar) + d_filename[m.end():]
                    print 'd_filename:' + str(d_filename)

        dispname = d_filename[:displength]
        print ('dispname : ' + dispname)

        splitit = []
        watchcomic_split = []
        logger.fdebug("filename comic and issue: " + comic_andiss)

        #changed this from '' to ' '
        comic_iss_b4 = re.sub('[\-\:\,]', ' ', comic_andiss)
        comic_iss = comic_iss_b4.replace('.',' ')
        comic_iss = re.sub('[\s+]', ' ', comic_iss).strip()
        logger.fdebug("adjusted comic and issue: " + str(comic_iss))
        #remove 'the' from here for proper comparisons.
        if ' the ' in comic_iss.lower():
            comic_iss = comic_iss[-4:]
        splitit = comic_iss.split(None)
        logger.fdebug("adjusting from: " + str(comic_iss_b4) + " to: " + str(comic_iss))
        #here we cycle through the Watchlist looking for a match.
        while (cm_cn < watchcnt):
            #setup the watchlist
            comname = ComicName[cm_cn]
            print ("watch_comic:" + comname)
            comyear = ComicYear[cm_cn]
            compub = ComicPublisher[cm_cn]
            comtotal = ComicTotal[cm_cn]
            comicid = ComicID[cm_cn]
            watch_location = ComicLocation[cm_cn]

           # there shouldn't be an issue in the comic now, so let's just assume it's all gravy.
            splitst = len(splitit)
            watchcomic_split = helpers.cleanName(comname)
            watchcomic_split = re.sub('[\-\:\,\.]', ' ', watchcomic_split).split(None)

            logger.fdebug(str(splitit) + " file series word count: " + str(splitst))
            logger.fdebug(str(watchcomic_split) + " watchlist word count: " + str(len(watchcomic_split)))
            if (splitst) != len(watchcomic_split):
                logger.fdebug("incorrect comic lengths...not a match")
#                if str(splitit[0]).lower() == "the":
#                    logger.fdebug("THE word detected...attempting to adjust pattern matching")
#                    splitit[0] = splitit[4:]
            else:
                logger.fdebug("length match..proceeding")
                n = 0
                scount = 0
                logger.fdebug("search-length: " + str(splitst))
                logger.fdebug("Watchlist-length: " + str(len(watchcomic_split)))
                while ( n <= (splitst)-1 ):
                    logger.fdebug("splitit: " + str(splitit[n]))
                    if n < (splitst) and n < len(watchcomic_split):
                        logger.fdebug(str(n) + " Comparing: " + str(watchcomic_split[n]) + " .to. " + str(splitit[n]))
                        if '+' in watchcomic_split[n]:
                            watchcomic_split[n] = re.sub('+', '', str(watchcomic_split[n]))
                        if str(watchcomic_split[n].lower()) in str(splitit[n].lower()) and len(watchcomic_split[n]) >= len(splitit[n]):
                            logger.fdebug("word matched on : " + str(splitit[n]))
                            scount+=1
                        #elif ':' in splitit[n] or '-' in splitit[n]:
                        #    splitrep = splitit[n].replace('-', '')
                        #    print ("non-character keyword...skipped on " + splitit[n])
                    elif str(splitit[n]).lower().startswith('v'):
                        logger.fdebug("possible versioning..checking")
                        #we hit a versioning # - account for it
                        if splitit[n][1:].isdigit():
                            comicversion = str(splitit[n])
                            logger.fdebug("version found: " + str(comicversion))
                    else:
                        logger.fdebug("Comic / Issue section")
                        if splitit[n].isdigit():
                            logger.fdebug("issue detected")
                        else:
                            logger.fdebug("non-match for: "+ str(splitit[n]))
                            pass
                    n+=1
                #set the match threshold to 80% (for now)
                # if it's less than 80% consider it a non-match and discard.
                #splitit has to splitit-1 because last position is issue.
                wordcnt = int(scount)
                logger.fdebug("scount:" + str(wordcnt))
                totalcnt = int(splitst)
                logger.fdebug("splitit-len:" + str(totalcnt))
                spercent = (wordcnt/totalcnt) * 100
                logger.fdebug("we got " + str(spercent) + " percent.")
                if int(spercent) >= 80:
                    logger.fdebug("it's a go captain... - we matched " + str(spercent) + "%!")
                    logger.fdebug("this should be a match!")
                    logger.fdebug("issue we found for is : " + str(comiss))
                    #set the year to the series we just found ;)
                    result_comyear = comyear
                    #issue comparison now as well
                    logger.info(u"Found " + comname + " (" + str(comyear) + ") issue: " + str(comiss))
                    watchmatch = str(comicid)
                    dispname = DisplayName[cm_cn]
                    foundonwatch = "True"
                    break
                elif int(spercent) < 80:
                    logger.fdebug("failure - we only got " + str(spercent) + "% right!")
            cm_cn+=1

        if foundonwatch == "False":
            watchmatch = None
        #---if it's not a match - send it to the importer.
        n = 0

        if volyr is None:
            if result_comyear is None: 
                result_comyear = '0000' #no year in filename basically.
        else:
            if result_comyear is None:
                result_comyear = volyr
        if volno is None:
            if volyr is None:
                vol_label = None
            else:
                vol_label = volyr
        else:
            vol_label = volno

        print ("adding " + com_NAME + " to the import-queue!")
        impid = com_NAME + "-" + str(result_comyear) + "-" + str(comiss)
        print ("impid: " + str(impid))
        import_by_comicids.append({ 
            "impid"       : impid,
            "watchmatch"  : watchmatch,
            "displayname" : dispname,
            "comicname"   : com_NAME,
            "comicyear"   : result_comyear,
            "volume"      : vol_label,
            "comfilename" : comfilename,
            "comlocation" : comlocation.decode(mylar.SYS_ENCODING)
                                   })

    if len(watch_kchoice) > 0:
        watchchoice['watchlist'] = watch_kchoice
        print ("watchchoice: " + str(watchchoice))

        logger.info("I have found " + str(watchfound) + " out of " + str(comiccnt) + " comics for series that are being watched.")
        wat = 0
        comicids = []

        if watchfound > 0:
            if mylar.IMP_MOVE:
                logger.info("You checked off Move Files...so that's what I'm going to do") 
                #check to see if Move Files is enabled.
                #if not being moved, set the archive bit.
                print("Moving files into appropriate directory")
                while (wat < watchfound): 
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comlocation = watch_the_list['ComicLocation']
                    watch_comicid = watch_the_list['ComicID']
                    watch_comicname = watch_the_list['ComicName']
                    watch_comicyear = watch_the_list['ComicYear']
                    watch_comiciss = watch_the_list['ComicIssue']
                    print ("ComicLocation: " + str(watch_comlocation))
                    orig_comlocation = watch_the_list['OriginalLocation']
                    orig_filename = watch_the_list['OriginalFilename'] 
                    print ("Orig. Location: " + str(orig_comlocation))
                    print ("Orig. Filename: " + str(orig_filename))
                    #before moving check to see if Rename to Mylar structure is enabled.
                    if mylar.IMP_RENAME:
                        print("Renaming files according to configuration details : " + str(mylar.FILE_FORMAT))
                        renameit = helpers.rename_param(watch_comicid, watch_comicname, watch_comicyear, watch_comiciss)
                        nfilename = renameit['nfilename']
                    
                        dst_path = os.path.join(watch_comlocation,nfilename)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)
                    else:
                        print("Renaming files not enabled, keeping original filename(s)")
                        dst_path = os.path.join(watch_comlocation,orig_filename)

                    #os.rename(os.path.join(self.nzb_folder, str(ofilename)), os.path.join(self.nzb_folder,str(nfilename + ext)))
                    #src = os.path.join(, str(nfilename + ext))
                    print ("I'm going to move " + str(orig_comlocation) + " to .." + str(dst_path))
                    try:
                        shutil.move(orig_comlocation, dst_path)
                    except (OSError, IOError):
                        logger.info("Failed to move directory - check directories and manually re-run.")
                    wat+=1
            else:
                # if move files isn't enabled, let's set all found comics to Archive status :)
                while (wat < watchfound):
                    watch_the_list = watchchoice['watchlist'][wat]
                    watch_comicid = watch_the_list['ComicID']
                    watch_issue = watch_the_list['ComicIssue']
                    print ("ComicID: " + str(watch_comicid))
                    print ("Issue#: " + str(watch_issue))
                    issuechk = myDB.selectone("SELECT * from issues where ComicID=? AND INT_IssueNumber=?", [watch_comicid, watch_issue]).fetchone()
                    if issuechk is None:
                        print ("no matching issues for this comic#")
                    else:
                        print("...Existing status: " + str(issuechk['Status']))
                        control = {"IssueID":   issuechk['IssueID']}
                        values = { "Status":   "Archived"}
                        print ("...changing status of " + str(issuechk['Issue_Number']) + " to Archived ")
                        myDB.upsert("issues", values, control)
                        if str(watch_comicid) not in comicids:
                            comicids.append(watch_comicid)                    
                    wat+=1
            if comicids is None: pass
            else:
                c_upd = len(comicids)
                c = 0
                while (c < c_upd ):
                    print ("Rescanning.. " + str(c))
                    updater.forceRescan(c) 
        if not len(import_by_comicids):
            return "Completed"
    if len(import_by_comicids) > 0:
        import_comicids['comic_info'] = import_by_comicids
        print ("import comicids: " + str(import_by_comicids))
        return import_comicids, len(import_by_comicids)
Ejemplo n.º 27
0
def PostProcess(nzb_name, nzb_folder):
        log2screen = ""
        log2screen = log2screen + "Nzb Name:" + nzb_name + "\n"
        log2screen = log2screen + "Nzb Folder:"  + nzb_folder + "\n"
                #lookup nzb_name in nzblog table to get issueid
        myDB = db.DBConnection()
        nzbiss = myDB.action("SELECT * from nzblog WHERE nzbname=?", [nzb_name]).fetchone()
        if nzbiss is None:
            log2screen = log2screen + "Epic failure - could not locate file to rename." + "\n"
            logger.error(u"Unable to locate downloaded file to rename. PostProcessing aborted.")
            return
        else: 
            issueid = nzbiss['IssueID']
        #log2screen = log2screen + "IssueID: " + issueid + "\n"
                #use issueid to get publisher, series, year, issue number
        issuenzb = myDB.action("SELECT * from issues WHERE issueid=?", [issueid]).fetchone()
        comicid = issuenzb['ComicID']
        #log2screen = log2screen + "ComicID: " + comicid + "\n"
        issuenum = issuenzb['Issue_Number']
        issueno = str(issuenum).split('.')[0]
        log2screen = log2screen + "Issue Number: " + str(issueno) + "\n"
        # issue zero-suppression here
        if mylar.ZERO_LEVEL == "0": 
            zeroadd = ""
        else:
            if mylar.ZERO_LEVEL_N  == "none": zeroadd = ""
            elif mylar.ZERO_LEVEL_N == "0x": zeroadd = "0"
            elif mylar.ZERO_LEVEL_N == "00x": zeroadd = "00"


        if str(len(issueno)) > 1:
            if int(issueno) < 10:
                log2screen = log2screen + "issue detected less than 10" + "\n"
                prettycomiss = str(zeroadd) + str(int(issueno))
                log2screen = log2screen + "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss) + "\n"
            elif int(issueno) >= 10 and int(issueno) < 100:
                log2screen = log2screen + "issue detected greater than 10, but less than 100" + "\n"
                if mylar.ZERO_LEVEL_N == "none":
                    zeroadd = ""
                else:
                    zeroadd = "0"
                prettycomiss = str(zeroadd) + str(int(issueno))
                log2screen = log2screen + "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ".Issue will be set as : " + str(prettycomiss) + "\n"
            else:
                log2screen = log2screen + "issue detected greater than 100" + "\n"
                prettycomiss = str(issueno)
                log2screen = log2screen + "Zero level supplement set to " + str(mylar.ZERO_LEVEL_N) + ". Issue will be set as : " + str(prettycomiss) + "\n"
        else:
            prettycomiss = str(issueno)
            log2screen = log2screen + "issue length error - cannot determine length. Defaulting to None:  " + str(prettycomiss) + "\n"

        issueyear = issuenzb['IssueDate'][:4]
        log2screen = log2screen + "Issue Year: " + str(issueyear) + "\n"
        comicnzb= myDB.action("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
        publisher = comicnzb['ComicPublisher']
        log2screen = log2screen + "Publisher: " + publisher + "\n"
        series = comicnzb['ComicName']
        log2screen = log2screen + "Series: " + series + "\n"
        seriesyear = comicnzb['ComicYear']
        log2screen = log2screen + "Year: " + seriesyear + "\n"
        comlocation = comicnzb['ComicLocation']
        log2screen = log2screen + "Comic Location: " + comlocation + "\n"
#---move to importer.py
                #get output path format
#        if ':' in series:
#            series = series.replace(':','')
                #do work to generate folder path
#        values = {'$Series':    series,
#              '$Publisher': publisher,
#              '$Year':      seriesyear
#              }
#        comlocation = mylar.DESTINATION_DIR + "/" + helpers.replace_all(mylar.FOLDER_FORMAT, values)
            #last perform space replace
#        if mylar.REPLACE_SPACES:
            #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
#            comlocation = comlocation.replace(' ', mylar.REPLACE_CHAR)
#        log2screen = log2screen + "Final Location: " + comlocation + "\n"
#---
        #rename file and move to new path
        #nfilename = series + " " + issueno + " (" + seriesyear + ")"
        file_values = {'$Series':    series,
                       '$Issue':     prettycomiss,
                       '$Year':      issueyear
                      }

        extensions = ('.cbr', '.cbz')

        for root, dirnames, filenames in os.walk(nzb_folder):
            for filename in filenames:
                if filename.lower().endswith(extensions):
                    ofilename = filename
                    path, ext = os.path.splitext(ofilename)
        log2screen = log2screen + "Original Filename: " + ofilename + "\n"
        log2screen = log2screen + "Original Extension: " + ext + "\n"
        if mylar.FILE_FORMAT == '':
            log2screen = log2screen + "Rename Files isn't enabled...keeping original filename." + "\n"
            #check if extension is in nzb_name - will screw up otherwise
            if ofilename.lower().endswith(extensions):
                nfilename = ofilename[:-4]
            else:
                nfilename = ofilename
        else:
            nfilename = helpers.replace_all(mylar.FILE_FORMAT, file_values)
            if mylar.REPLACE_SPACES:
                #mylar.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
                nfilename = nfilename.replace(' ', mylar.REPLACE_CHAR)
        #TODO - sort issue numbering 12.00 should be 12
        log2screen = log2screen + "New Filename: " + nfilename + "\n"

        src = nzb_folder + "/" + ofilename
        dst = comlocation + "/" + nfilename + ext
        log2screen = log2screen + "Source:" + src + "\n"
        log2screen = log2screen + "Destination:" +  dst + "\n"
        os.rename(nzb_folder + "/" + ofilename, nzb_folder + "/" + nfilename + ext)
        src = nzb_folder + "/" + nfilename + ext
        try:
            shutil.move(src, dst)
        except (OSError, IOError):
            log2screen = log2screen + "Failed to move directory - check directories and manually re-run." + "\n"
            log2screen = log2screen + "Post-Processing ABORTED." + "\n"
            return log2screen
        #tidyup old path
        try:
            shutil.rmtree(nzb_folder)
        except (OSError, IOError):
            log2screen = log2screen + "Failed to remove temporary directory - check directory and manually re-run." + "\n"
            log2screen = log2screen + "Post-Processing ABORTED." + "\n"
            return log2screen

        log2screen = log2screen + "Removed temporary directory : " + str(nzb_folder) + "\n"
                #delete entry from nzblog table
        myDB.action('DELETE from nzblog WHERE issueid=?', [issueid])
                #force rescan of files
        updater.forceRescan(comicid)
        logger.info(u"Post-Processing completed for: " + series + " issue: " + str(issuenum) )
        log2screen = log2screen + "Post Processing SUCCESSFULL!" + "\n"
        #print log2screen
        return log2screen